1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/ProfileList.h" 62 #include "clang/Basic/SourceLocation.h" 63 #include "clang/Basic/SourceManager.h" 64 #include "clang/Basic/Specifiers.h" 65 #include "clang/Basic/TargetCXXABI.h" 66 #include "clang/Basic/TargetInfo.h" 67 #include "clang/Basic/XRayLists.h" 68 #include "llvm/ADT/APFixedPoint.h" 69 #include "llvm/ADT/APInt.h" 70 #include "llvm/ADT/APSInt.h" 71 #include "llvm/ADT/ArrayRef.h" 72 #include "llvm/ADT/DenseMap.h" 73 #include "llvm/ADT/DenseSet.h" 74 #include "llvm/ADT/FoldingSet.h" 75 #include "llvm/ADT/PointerUnion.h" 76 #include "llvm/ADT/STLExtras.h" 77 #include "llvm/ADT/SmallPtrSet.h" 78 #include "llvm/ADT/SmallVector.h" 79 #include "llvm/ADT/StringExtras.h" 80 #include "llvm/ADT/StringRef.h" 81 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 82 #include "llvm/Support/Capacity.h" 83 #include "llvm/Support/Casting.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/MD5.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/TargetParser/Triple.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <optional> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 102 using namespace clang; 103 104 enum FloatingRank { 105 BFloat16Rank, 106 Float16Rank, 107 HalfRank, 108 FloatRank, 109 DoubleRank, 110 LongDoubleRank, 111 Float128Rank, 112 Ibm128Rank 113 }; 114 115 /// \returns The locations that are relevant when searching for Doc comments 116 /// related to \p D. 117 static SmallVector<SourceLocation, 2> 118 getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) { 119 assert(D); 120 121 // User can not attach documentation to implicit declarations. 122 if (D->isImplicit()) 123 return {}; 124 125 // User can not attach documentation to implicit instantiations. 126 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 127 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 128 return {}; 129 } 130 131 if (const auto *VD = dyn_cast<VarDecl>(D)) { 132 if (VD->isStaticDataMember() && 133 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 134 return {}; 135 } 136 137 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 138 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 139 return {}; 140 } 141 142 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 143 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 144 if (TSK == TSK_ImplicitInstantiation || 145 TSK == TSK_Undeclared) 146 return {}; 147 } 148 149 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 150 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 151 return {}; 152 } 153 if (const auto *TD = dyn_cast<TagDecl>(D)) { 154 // When tag declaration (but not definition!) is part of the 155 // decl-specifier-seq of some other declaration, it doesn't get comment 156 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 157 return {}; 158 } 159 // TODO: handle comments for function parameters properly. 160 if (isa<ParmVarDecl>(D)) 161 return {}; 162 163 // TODO: we could look up template parameter documentation in the template 164 // documentation. 165 if (isa<TemplateTypeParmDecl>(D) || 166 isa<NonTypeTemplateParmDecl>(D) || 167 isa<TemplateTemplateParmDecl>(D)) 168 return {}; 169 170 SmallVector<SourceLocation, 2> Locations; 171 // Find declaration location. 172 // For Objective-C declarations we generally don't expect to have multiple 173 // declarators, thus use declaration starting location as the "declaration 174 // location". 175 // For all other declarations multiple declarators are used quite frequently, 176 // so we use the location of the identifier as the "declaration location". 177 SourceLocation BaseLocation; 178 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 179 isa<ObjCPropertyDecl>(D) || isa<RedeclarableTemplateDecl>(D) || 180 isa<ClassTemplateSpecializationDecl>(D) || 181 // Allow association with Y across {} in `typedef struct X {} Y`. 182 isa<TypedefDecl>(D)) 183 BaseLocation = D->getBeginLoc(); 184 else 185 BaseLocation = D->getLocation(); 186 187 if (!D->getLocation().isMacroID()) { 188 Locations.emplace_back(BaseLocation); 189 } else { 190 const auto *DeclCtx = D->getDeclContext(); 191 192 // When encountering definitions generated from a macro (that are not 193 // contained by another declaration in the macro) we need to try and find 194 // the comment at the location of the expansion but if there is no comment 195 // there we should retry to see if there is a comment inside the macro as 196 // well. To this end we return first BaseLocation to first look at the 197 // expansion site, the second value is the spelling location of the 198 // beginning of the declaration defined inside the macro. 199 if (!(DeclCtx && 200 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) { 201 Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation)); 202 } 203 204 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that 205 // we don't refer to the macro argument location at the expansion site (this 206 // can happen if the name's spelling is provided via macro argument), and 207 // always to the declaration itself. 208 Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc())); 209 } 210 211 return Locations; 212 } 213 214 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 215 const Decl *D, const SourceLocation RepresentativeLocForDecl, 216 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 217 // If the declaration doesn't map directly to a location in a file, we 218 // can't find the comment. 219 if (RepresentativeLocForDecl.isInvalid() || 220 !RepresentativeLocForDecl.isFileID()) 221 return nullptr; 222 223 // If there are no comments anywhere, we won't find anything. 224 if (CommentsInTheFile.empty()) 225 return nullptr; 226 227 // Decompose the location for the declaration and find the beginning of the 228 // file buffer. 229 const std::pair<FileID, unsigned> DeclLocDecomp = 230 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 231 232 // Slow path. 233 auto OffsetCommentBehindDecl = 234 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 235 236 // First check whether we have a trailing comment. 237 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 238 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 239 if ((CommentBehindDecl->isDocumentation() || 240 LangOpts.CommentOpts.ParseAllComments) && 241 CommentBehindDecl->isTrailingComment() && 242 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 243 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 244 245 // Check that Doxygen trailing comment comes after the declaration, starts 246 // on the same line and in the same file as the declaration. 247 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 248 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 249 OffsetCommentBehindDecl->first)) { 250 return CommentBehindDecl; 251 } 252 } 253 } 254 255 // The comment just after the declaration was not a trailing comment. 256 // Let's look at the previous comment. 257 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 258 return nullptr; 259 260 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 261 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 262 263 // Check that we actually have a non-member Doxygen comment. 264 if (!(CommentBeforeDecl->isDocumentation() || 265 LangOpts.CommentOpts.ParseAllComments) || 266 CommentBeforeDecl->isTrailingComment()) 267 return nullptr; 268 269 // Decompose the end of the comment. 270 const unsigned CommentEndOffset = 271 Comments.getCommentEndOffset(CommentBeforeDecl); 272 273 // Get the corresponding buffer. 274 bool Invalid = false; 275 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 276 &Invalid).data(); 277 if (Invalid) 278 return nullptr; 279 280 // Extract text between the comment and declaration. 281 StringRef Text(Buffer + CommentEndOffset, 282 DeclLocDecomp.second - CommentEndOffset); 283 284 // There should be no other declarations or preprocessor directives between 285 // comment and declaration. 286 if (Text.find_last_of(";{}#@") != StringRef::npos) 287 return nullptr; 288 289 return CommentBeforeDecl; 290 } 291 292 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 293 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 294 295 for (const auto DeclLoc : DeclLocs) { 296 // If the declaration doesn't map directly to a location in a file, we 297 // can't find the comment. 298 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 299 continue; 300 301 if (ExternalSource && !CommentsLoaded) { 302 ExternalSource->ReadComments(); 303 CommentsLoaded = true; 304 } 305 306 if (Comments.empty()) 307 continue; 308 309 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 310 if (!File.isValid()) 311 continue; 312 313 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 314 if (!CommentsInThisFile || CommentsInThisFile->empty()) 315 continue; 316 317 if (RawComment *Comment = 318 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) 319 return Comment; 320 } 321 322 return nullptr; 323 } 324 325 void ASTContext::addComment(const RawComment &RC) { 326 assert(LangOpts.RetainCommentsFromSystemHeaders || 327 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 328 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 329 } 330 331 /// If we have a 'templated' declaration for a template, adjust 'D' to 332 /// refer to the actual template. 333 /// If we have an implicit instantiation, adjust 'D' to refer to template. 334 static const Decl &adjustDeclToTemplate(const Decl &D) { 335 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 336 // Is this function declaration part of a function template? 337 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 338 return *FTD; 339 340 // Nothing to do if function is not an implicit instantiation. 341 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 342 return D; 343 344 // Function is an implicit instantiation of a function template? 345 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 346 return *FTD; 347 348 // Function is instantiated from a member definition of a class template? 349 if (const FunctionDecl *MemberDecl = 350 FD->getInstantiatedFromMemberFunction()) 351 return *MemberDecl; 352 353 return D; 354 } 355 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 356 // Static data member is instantiated from a member definition of a class 357 // template? 358 if (VD->isStaticDataMember()) 359 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 360 return *MemberDecl; 361 362 return D; 363 } 364 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 365 // Is this class declaration part of a class template? 366 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 367 return *CTD; 368 369 // Class is an implicit instantiation of a class template or partial 370 // specialization? 371 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 372 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 373 return D; 374 llvm::PointerUnion<ClassTemplateDecl *, 375 ClassTemplatePartialSpecializationDecl *> 376 PU = CTSD->getSpecializedTemplateOrPartial(); 377 return PU.is<ClassTemplateDecl *>() 378 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 379 : *static_cast<const Decl *>( 380 PU.get<ClassTemplatePartialSpecializationDecl *>()); 381 } 382 383 // Class is instantiated from a member definition of a class template? 384 if (const MemberSpecializationInfo *Info = 385 CRD->getMemberSpecializationInfo()) 386 return *Info->getInstantiatedFrom(); 387 388 return D; 389 } 390 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 391 // Enum is instantiated from a member definition of a class template? 392 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 393 return *MemberDecl; 394 395 return D; 396 } 397 // FIXME: Adjust alias templates? 398 return D; 399 } 400 401 const RawComment *ASTContext::getRawCommentForAnyRedecl( 402 const Decl *D, 403 const Decl **OriginalDecl) const { 404 if (!D) { 405 if (OriginalDecl) 406 OriginalDecl = nullptr; 407 return nullptr; 408 } 409 410 D = &adjustDeclToTemplate(*D); 411 412 // Any comment directly attached to D? 413 { 414 auto DeclComment = DeclRawComments.find(D); 415 if (DeclComment != DeclRawComments.end()) { 416 if (OriginalDecl) 417 *OriginalDecl = D; 418 return DeclComment->second; 419 } 420 } 421 422 // Any comment attached to any redeclaration of D? 423 const Decl *CanonicalD = D->getCanonicalDecl(); 424 if (!CanonicalD) 425 return nullptr; 426 427 { 428 auto RedeclComment = RedeclChainComments.find(CanonicalD); 429 if (RedeclComment != RedeclChainComments.end()) { 430 if (OriginalDecl) 431 *OriginalDecl = RedeclComment->second; 432 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 433 assert(CommentAtRedecl != DeclRawComments.end() && 434 "This decl is supposed to have comment attached."); 435 return CommentAtRedecl->second; 436 } 437 } 438 439 // Any redeclarations of D that we haven't checked for comments yet? 440 // We can't use DenseMap::iterator directly since it'd get invalid. 441 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 442 return CommentlessRedeclChains.lookup(CanonicalD); 443 }(); 444 445 for (const auto Redecl : D->redecls()) { 446 assert(Redecl); 447 // Skip all redeclarations that have been checked previously. 448 if (LastCheckedRedecl) { 449 if (LastCheckedRedecl == Redecl) { 450 LastCheckedRedecl = nullptr; 451 } 452 continue; 453 } 454 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 455 if (RedeclComment) { 456 cacheRawCommentForDecl(*Redecl, *RedeclComment); 457 if (OriginalDecl) 458 *OriginalDecl = Redecl; 459 return RedeclComment; 460 } 461 CommentlessRedeclChains[CanonicalD] = Redecl; 462 } 463 464 if (OriginalDecl) 465 *OriginalDecl = nullptr; 466 return nullptr; 467 } 468 469 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 470 const RawComment &Comment) const { 471 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 472 DeclRawComments.try_emplace(&OriginalD, &Comment); 473 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 474 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 475 CommentlessRedeclChains.erase(CanonicalDecl); 476 } 477 478 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 479 SmallVectorImpl<const NamedDecl *> &Redeclared) { 480 const DeclContext *DC = ObjCMethod->getDeclContext(); 481 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 482 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 483 if (!ID) 484 return; 485 // Add redeclared method here. 486 for (const auto *Ext : ID->known_extensions()) { 487 if (ObjCMethodDecl *RedeclaredMethod = 488 Ext->getMethod(ObjCMethod->getSelector(), 489 ObjCMethod->isInstanceMethod())) 490 Redeclared.push_back(RedeclaredMethod); 491 } 492 } 493 } 494 495 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 496 const Preprocessor *PP) { 497 if (Comments.empty() || Decls.empty()) 498 return; 499 500 FileID File; 501 for (const Decl *D : Decls) { 502 if (D->isInvalidDecl()) 503 continue; 504 505 D = &adjustDeclToTemplate(*D); 506 SourceLocation Loc = D->getLocation(); 507 if (Loc.isValid()) { 508 // See if there are any new comments that are not attached to a decl. 509 // The location doesn't have to be precise - we care only about the file. 510 File = SourceMgr.getDecomposedLoc(Loc).first; 511 break; 512 } 513 } 514 515 if (File.isInvalid()) 516 return; 517 518 auto CommentsInThisFile = Comments.getCommentsInFile(File); 519 if (!CommentsInThisFile || CommentsInThisFile->empty() || 520 CommentsInThisFile->rbegin()->second->isAttached()) 521 return; 522 523 // There is at least one comment not attached to a decl. 524 // Maybe it should be attached to one of Decls? 525 // 526 // Note that this way we pick up not only comments that precede the 527 // declaration, but also comments that *follow* the declaration -- thanks to 528 // the lookahead in the lexer: we've consumed the semicolon and looked 529 // ahead through comments. 530 for (const Decl *D : Decls) { 531 assert(D); 532 if (D->isInvalidDecl()) 533 continue; 534 535 D = &adjustDeclToTemplate(*D); 536 537 if (DeclRawComments.count(D) > 0) 538 continue; 539 540 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 541 542 for (const auto DeclLoc : DeclLocs) { 543 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 544 continue; 545 546 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl( 547 D, DeclLoc, *CommentsInThisFile)) { 548 cacheRawCommentForDecl(*D, *DocComment); 549 comments::FullComment *FC = DocComment->parse(*this, PP, D); 550 ParsedComments[D->getCanonicalDecl()] = FC; 551 break; 552 } 553 } 554 } 555 } 556 557 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 558 const Decl *D) const { 559 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 560 ThisDeclInfo->CommentDecl = D; 561 ThisDeclInfo->IsFilled = false; 562 ThisDeclInfo->fill(); 563 ThisDeclInfo->CommentDecl = FC->getDecl(); 564 if (!ThisDeclInfo->TemplateParameters) 565 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 566 comments::FullComment *CFC = 567 new (*this) comments::FullComment(FC->getBlocks(), 568 ThisDeclInfo); 569 return CFC; 570 } 571 572 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 573 const RawComment *RC = getRawCommentForDeclNoCache(D); 574 return RC ? RC->parse(*this, nullptr, D) : nullptr; 575 } 576 577 comments::FullComment *ASTContext::getCommentForDecl( 578 const Decl *D, 579 const Preprocessor *PP) const { 580 if (!D || D->isInvalidDecl()) 581 return nullptr; 582 D = &adjustDeclToTemplate(*D); 583 584 const Decl *Canonical = D->getCanonicalDecl(); 585 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 586 ParsedComments.find(Canonical); 587 588 if (Pos != ParsedComments.end()) { 589 if (Canonical != D) { 590 comments::FullComment *FC = Pos->second; 591 comments::FullComment *CFC = cloneFullComment(FC, D); 592 return CFC; 593 } 594 return Pos->second; 595 } 596 597 const Decl *OriginalDecl = nullptr; 598 599 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 600 if (!RC) { 601 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 602 SmallVector<const NamedDecl*, 8> Overridden; 603 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 604 if (OMD && OMD->isPropertyAccessor()) 605 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 606 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 607 return cloneFullComment(FC, D); 608 if (OMD) 609 addRedeclaredMethods(OMD, Overridden); 610 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 611 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 612 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 613 return cloneFullComment(FC, D); 614 } 615 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 616 // Attach any tag type's documentation to its typedef if latter 617 // does not have one of its own. 618 QualType QT = TD->getUnderlyingType(); 619 if (const auto *TT = QT->getAs<TagType>()) 620 if (const Decl *TD = TT->getDecl()) 621 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 622 return cloneFullComment(FC, D); 623 } 624 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 625 while (IC->getSuperClass()) { 626 IC = IC->getSuperClass(); 627 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 628 return cloneFullComment(FC, D); 629 } 630 } 631 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 632 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 633 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 634 return cloneFullComment(FC, D); 635 } 636 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 637 if (!(RD = RD->getDefinition())) 638 return nullptr; 639 // Check non-virtual bases. 640 for (const auto &I : RD->bases()) { 641 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 642 continue; 643 QualType Ty = I.getType(); 644 if (Ty.isNull()) 645 continue; 646 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 647 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 648 continue; 649 650 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 651 return cloneFullComment(FC, D); 652 } 653 } 654 // Check virtual bases. 655 for (const auto &I : RD->vbases()) { 656 if (I.getAccessSpecifier() != AS_public) 657 continue; 658 QualType Ty = I.getType(); 659 if (Ty.isNull()) 660 continue; 661 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 662 if (!(VirtualBase= VirtualBase->getDefinition())) 663 continue; 664 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 665 return cloneFullComment(FC, D); 666 } 667 } 668 } 669 return nullptr; 670 } 671 672 // If the RawComment was attached to other redeclaration of this Decl, we 673 // should parse the comment in context of that other Decl. This is important 674 // because comments can contain references to parameter names which can be 675 // different across redeclarations. 676 if (D != OriginalDecl && OriginalDecl) 677 return getCommentForDecl(OriginalDecl, PP); 678 679 comments::FullComment *FC = RC->parse(*this, PP, D); 680 ParsedComments[Canonical] = FC; 681 return FC; 682 } 683 684 void 685 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 686 const ASTContext &C, 687 TemplateTemplateParmDecl *Parm) { 688 ID.AddInteger(Parm->getDepth()); 689 ID.AddInteger(Parm->getPosition()); 690 ID.AddBoolean(Parm->isParameterPack()); 691 692 TemplateParameterList *Params = Parm->getTemplateParameters(); 693 ID.AddInteger(Params->size()); 694 for (TemplateParameterList::const_iterator P = Params->begin(), 695 PEnd = Params->end(); 696 P != PEnd; ++P) { 697 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 698 ID.AddInteger(0); 699 ID.AddBoolean(TTP->isParameterPack()); 700 if (TTP->isExpandedParameterPack()) { 701 ID.AddBoolean(true); 702 ID.AddInteger(TTP->getNumExpansionParameters()); 703 } else 704 ID.AddBoolean(false); 705 continue; 706 } 707 708 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 709 ID.AddInteger(1); 710 ID.AddBoolean(NTTP->isParameterPack()); 711 ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType())) 712 .getAsOpaquePtr()); 713 if (NTTP->isExpandedParameterPack()) { 714 ID.AddBoolean(true); 715 ID.AddInteger(NTTP->getNumExpansionTypes()); 716 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 717 QualType T = NTTP->getExpansionType(I); 718 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 719 } 720 } else 721 ID.AddBoolean(false); 722 continue; 723 } 724 725 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 726 ID.AddInteger(2); 727 Profile(ID, C, TTP); 728 } 729 } 730 731 TemplateTemplateParmDecl * 732 ASTContext::getCanonicalTemplateTemplateParmDecl( 733 TemplateTemplateParmDecl *TTP) const { 734 // Check if we already have a canonical template template parameter. 735 llvm::FoldingSetNodeID ID; 736 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 737 void *InsertPos = nullptr; 738 CanonicalTemplateTemplateParm *Canonical 739 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 740 if (Canonical) 741 return Canonical->getParam(); 742 743 // Build a canonical template parameter list. 744 TemplateParameterList *Params = TTP->getTemplateParameters(); 745 SmallVector<NamedDecl *, 4> CanonParams; 746 CanonParams.reserve(Params->size()); 747 for (TemplateParameterList::const_iterator P = Params->begin(), 748 PEnd = Params->end(); 749 P != PEnd; ++P) { 750 // Note that, per C++20 [temp.over.link]/6, when determining whether 751 // template-parameters are equivalent, constraints are ignored. 752 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 753 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create( 754 *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 755 TTP->getDepth(), TTP->getIndex(), nullptr, false, 756 TTP->isParameterPack(), /*HasTypeConstraint=*/false, 757 TTP->isExpandedParameterPack() 758 ? std::optional<unsigned>(TTP->getNumExpansionParameters()) 759 : std::nullopt); 760 CanonParams.push_back(NewTTP); 761 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 762 QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType())); 763 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 764 NonTypeTemplateParmDecl *Param; 765 if (NTTP->isExpandedParameterPack()) { 766 SmallVector<QualType, 2> ExpandedTypes; 767 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 768 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 769 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 770 ExpandedTInfos.push_back( 771 getTrivialTypeSourceInfo(ExpandedTypes.back())); 772 } 773 774 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 775 SourceLocation(), 776 SourceLocation(), 777 NTTP->getDepth(), 778 NTTP->getPosition(), nullptr, 779 T, 780 TInfo, 781 ExpandedTypes, 782 ExpandedTInfos); 783 } else { 784 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 785 SourceLocation(), 786 SourceLocation(), 787 NTTP->getDepth(), 788 NTTP->getPosition(), nullptr, 789 T, 790 NTTP->isParameterPack(), 791 TInfo); 792 } 793 CanonParams.push_back(Param); 794 } else 795 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 796 cast<TemplateTemplateParmDecl>(*P))); 797 } 798 799 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create( 800 *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), 801 TTP->getPosition(), TTP->isParameterPack(), nullptr, 802 TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), 803 CanonParams, SourceLocation(), 804 /*RequiresClause=*/nullptr)); 805 806 // Get the new insert position for the node we care about. 807 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 808 assert(!Canonical && "Shouldn't be in the map!"); 809 (void)Canonical; 810 811 // Create the canonical template template parameter entry. 812 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 813 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 814 return CanonTTP; 815 } 816 817 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 818 auto Kind = getTargetInfo().getCXXABI().getKind(); 819 return getLangOpts().CXXABI.value_or(Kind); 820 } 821 822 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 823 if (!LangOpts.CPlusPlus) return nullptr; 824 825 switch (getCXXABIKind()) { 826 case TargetCXXABI::AppleARM64: 827 case TargetCXXABI::Fuchsia: 828 case TargetCXXABI::GenericARM: // Same as Itanium at this level 829 case TargetCXXABI::iOS: 830 case TargetCXXABI::WatchOS: 831 case TargetCXXABI::GenericAArch64: 832 case TargetCXXABI::GenericMIPS: 833 case TargetCXXABI::GenericItanium: 834 case TargetCXXABI::WebAssembly: 835 case TargetCXXABI::XL: 836 return CreateItaniumCXXABI(*this); 837 case TargetCXXABI::Microsoft: 838 return CreateMicrosoftCXXABI(*this); 839 } 840 llvm_unreachable("Invalid CXXABI type!"); 841 } 842 843 interp::Context &ASTContext::getInterpContext() { 844 if (!InterpContext) { 845 InterpContext.reset(new interp::Context(*this)); 846 } 847 return *InterpContext.get(); 848 } 849 850 ParentMapContext &ASTContext::getParentMapContext() { 851 if (!ParentMapCtx) 852 ParentMapCtx.reset(new ParentMapContext(*this)); 853 return *ParentMapCtx.get(); 854 } 855 856 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 857 const LangOptions &LangOpts) { 858 switch (LangOpts.getAddressSpaceMapMangling()) { 859 case LangOptions::ASMM_Target: 860 return TI.useAddressSpaceMapMangling(); 861 case LangOptions::ASMM_On: 862 return true; 863 case LangOptions::ASMM_Off: 864 return false; 865 } 866 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 867 } 868 869 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 870 IdentifierTable &idents, SelectorTable &sels, 871 Builtin::Context &builtins, TranslationUnitKind TUKind) 872 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 873 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()), 874 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()), 875 DependentSizedMatrixTypes(this_()), 876 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 877 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()), 878 TemplateSpecializationTypes(this_()), 879 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 880 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()), 881 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 882 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 883 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 884 LangOpts.XRayNeverInstrumentFiles, 885 LangOpts.XRayAttrListFiles, SM)), 886 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 887 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 888 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 889 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 890 CompCategories(this_()), LastSDM(nullptr, 0) { 891 addTranslationUnitDecl(); 892 } 893 894 void ASTContext::cleanup() { 895 // Release the DenseMaps associated with DeclContext objects. 896 // FIXME: Is this the ideal solution? 897 ReleaseDeclContextMaps(); 898 899 // Call all of the deallocation functions on all of their targets. 900 for (auto &Pair : Deallocations) 901 (Pair.first)(Pair.second); 902 Deallocations.clear(); 903 904 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 905 // because they can contain DenseMaps. 906 for (llvm::DenseMap<const ObjCContainerDecl*, 907 const ASTRecordLayout*>::iterator 908 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 909 // Increment in loop to prevent using deallocated memory. 910 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 911 R->Destroy(*this); 912 ObjCLayouts.clear(); 913 914 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 915 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 916 // Increment in loop to prevent using deallocated memory. 917 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 918 R->Destroy(*this); 919 } 920 ASTRecordLayouts.clear(); 921 922 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 923 AEnd = DeclAttrs.end(); 924 A != AEnd; ++A) 925 A->second->~AttrVec(); 926 DeclAttrs.clear(); 927 928 for (const auto &Value : ModuleInitializers) 929 Value.second->~PerModuleInitializers(); 930 ModuleInitializers.clear(); 931 } 932 933 ASTContext::~ASTContext() { cleanup(); } 934 935 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 936 TraversalScope = TopLevelDecls; 937 getParentMapContext().clear(); 938 } 939 940 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 941 Deallocations.push_back({Callback, Data}); 942 } 943 944 void 945 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 946 ExternalSource = std::move(Source); 947 } 948 949 void ASTContext::PrintStats() const { 950 llvm::errs() << "\n*** AST Context Stats:\n"; 951 llvm::errs() << " " << Types.size() << " types total.\n"; 952 953 unsigned counts[] = { 954 #define TYPE(Name, Parent) 0, 955 #define ABSTRACT_TYPE(Name, Parent) 956 #include "clang/AST/TypeNodes.inc" 957 0 // Extra 958 }; 959 960 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 961 Type *T = Types[i]; 962 counts[(unsigned)T->getTypeClass()]++; 963 } 964 965 unsigned Idx = 0; 966 unsigned TotalBytes = 0; 967 #define TYPE(Name, Parent) \ 968 if (counts[Idx]) \ 969 llvm::errs() << " " << counts[Idx] << " " << #Name \ 970 << " types, " << sizeof(Name##Type) << " each " \ 971 << "(" << counts[Idx] * sizeof(Name##Type) \ 972 << " bytes)\n"; \ 973 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 974 ++Idx; 975 #define ABSTRACT_TYPE(Name, Parent) 976 #include "clang/AST/TypeNodes.inc" 977 978 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 979 980 // Implicit special member functions. 981 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 982 << NumImplicitDefaultConstructors 983 << " implicit default constructors created\n"; 984 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 985 << NumImplicitCopyConstructors 986 << " implicit copy constructors created\n"; 987 if (getLangOpts().CPlusPlus) 988 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 989 << NumImplicitMoveConstructors 990 << " implicit move constructors created\n"; 991 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 992 << NumImplicitCopyAssignmentOperators 993 << " implicit copy assignment operators created\n"; 994 if (getLangOpts().CPlusPlus) 995 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 996 << NumImplicitMoveAssignmentOperators 997 << " implicit move assignment operators created\n"; 998 llvm::errs() << NumImplicitDestructorsDeclared << "/" 999 << NumImplicitDestructors 1000 << " implicit destructors created\n"; 1001 1002 if (ExternalSource) { 1003 llvm::errs() << "\n"; 1004 ExternalSource->PrintStats(); 1005 } 1006 1007 BumpAlloc.PrintStats(); 1008 } 1009 1010 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1011 bool NotifyListeners) { 1012 if (NotifyListeners) 1013 if (auto *Listener = getASTMutationListener()) 1014 Listener->RedefinedHiddenDefinition(ND, M); 1015 1016 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1017 } 1018 1019 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1020 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1021 if (It == MergedDefModules.end()) 1022 return; 1023 1024 auto &Merged = It->second; 1025 llvm::DenseSet<Module*> Found; 1026 for (Module *&M : Merged) 1027 if (!Found.insert(M).second) 1028 M = nullptr; 1029 llvm::erase(Merged, nullptr); 1030 } 1031 1032 ArrayRef<Module *> 1033 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1034 auto MergedIt = 1035 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1036 if (MergedIt == MergedDefModules.end()) 1037 return std::nullopt; 1038 return MergedIt->second; 1039 } 1040 1041 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1042 if (LazyInitializers.empty()) 1043 return; 1044 1045 auto *Source = Ctx.getExternalSource(); 1046 assert(Source && "lazy initializers but no external source"); 1047 1048 auto LazyInits = std::move(LazyInitializers); 1049 LazyInitializers.clear(); 1050 1051 for (auto ID : LazyInits) 1052 Initializers.push_back(Source->GetExternalDecl(ID)); 1053 1054 assert(LazyInitializers.empty() && 1055 "GetExternalDecl for lazy module initializer added more inits"); 1056 } 1057 1058 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1059 // One special case: if we add a module initializer that imports another 1060 // module, and that module's only initializer is an ImportDecl, simplify. 1061 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1062 auto It = ModuleInitializers.find(ID->getImportedModule()); 1063 1064 // Maybe the ImportDecl does nothing at all. (Common case.) 1065 if (It == ModuleInitializers.end()) 1066 return; 1067 1068 // Maybe the ImportDecl only imports another ImportDecl. 1069 auto &Imported = *It->second; 1070 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1071 Imported.resolve(*this); 1072 auto *OnlyDecl = Imported.Initializers.front(); 1073 if (isa<ImportDecl>(OnlyDecl)) 1074 D = OnlyDecl; 1075 } 1076 } 1077 1078 auto *&Inits = ModuleInitializers[M]; 1079 if (!Inits) 1080 Inits = new (*this) PerModuleInitializers; 1081 Inits->Initializers.push_back(D); 1082 } 1083 1084 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1085 auto *&Inits = ModuleInitializers[M]; 1086 if (!Inits) 1087 Inits = new (*this) PerModuleInitializers; 1088 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1089 IDs.begin(), IDs.end()); 1090 } 1091 1092 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1093 auto It = ModuleInitializers.find(M); 1094 if (It == ModuleInitializers.end()) 1095 return std::nullopt; 1096 1097 auto *Inits = It->second; 1098 Inits->resolve(*this); 1099 return Inits->Initializers; 1100 } 1101 1102 void ASTContext::setCurrentNamedModule(Module *M) { 1103 assert(M->isNamedModule()); 1104 assert(!CurrentCXXNamedModule && 1105 "We should set named module for ASTContext for only once"); 1106 CurrentCXXNamedModule = M; 1107 } 1108 1109 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1110 if (!ExternCContext) 1111 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1112 1113 return ExternCContext; 1114 } 1115 1116 BuiltinTemplateDecl * 1117 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1118 const IdentifierInfo *II) const { 1119 auto *BuiltinTemplate = 1120 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1121 BuiltinTemplate->setImplicit(); 1122 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1123 1124 return BuiltinTemplate; 1125 } 1126 1127 BuiltinTemplateDecl * 1128 ASTContext::getMakeIntegerSeqDecl() const { 1129 if (!MakeIntegerSeqDecl) 1130 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1131 getMakeIntegerSeqName()); 1132 return MakeIntegerSeqDecl; 1133 } 1134 1135 BuiltinTemplateDecl * 1136 ASTContext::getTypePackElementDecl() const { 1137 if (!TypePackElementDecl) 1138 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1139 getTypePackElementName()); 1140 return TypePackElementDecl; 1141 } 1142 1143 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1144 RecordDecl::TagKind TK) const { 1145 SourceLocation Loc; 1146 RecordDecl *NewDecl; 1147 if (getLangOpts().CPlusPlus) 1148 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1149 Loc, &Idents.get(Name)); 1150 else 1151 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1152 &Idents.get(Name)); 1153 NewDecl->setImplicit(); 1154 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1155 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1156 return NewDecl; 1157 } 1158 1159 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1160 StringRef Name) const { 1161 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1162 TypedefDecl *NewDecl = TypedefDecl::Create( 1163 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1164 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1165 NewDecl->setImplicit(); 1166 return NewDecl; 1167 } 1168 1169 TypedefDecl *ASTContext::getInt128Decl() const { 1170 if (!Int128Decl) 1171 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1172 return Int128Decl; 1173 } 1174 1175 TypedefDecl *ASTContext::getUInt128Decl() const { 1176 if (!UInt128Decl) 1177 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1178 return UInt128Decl; 1179 } 1180 1181 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1182 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K); 1183 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1184 Types.push_back(Ty); 1185 } 1186 1187 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1188 const TargetInfo *AuxTarget) { 1189 assert((!this->Target || this->Target == &Target) && 1190 "Incorrect target reinitialization"); 1191 assert(VoidTy.isNull() && "Context reinitialized?"); 1192 1193 this->Target = &Target; 1194 this->AuxTarget = AuxTarget; 1195 1196 ABI.reset(createCXXABI(Target)); 1197 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1198 1199 // C99 6.2.5p19. 1200 InitBuiltinType(VoidTy, BuiltinType::Void); 1201 1202 // C99 6.2.5p2. 1203 InitBuiltinType(BoolTy, BuiltinType::Bool); 1204 // C99 6.2.5p3. 1205 if (LangOpts.CharIsSigned) 1206 InitBuiltinType(CharTy, BuiltinType::Char_S); 1207 else 1208 InitBuiltinType(CharTy, BuiltinType::Char_U); 1209 // C99 6.2.5p4. 1210 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1211 InitBuiltinType(ShortTy, BuiltinType::Short); 1212 InitBuiltinType(IntTy, BuiltinType::Int); 1213 InitBuiltinType(LongTy, BuiltinType::Long); 1214 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1215 1216 // C99 6.2.5p6. 1217 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1218 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1219 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1220 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1221 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1222 1223 // C99 6.2.5p10. 1224 InitBuiltinType(FloatTy, BuiltinType::Float); 1225 InitBuiltinType(DoubleTy, BuiltinType::Double); 1226 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1227 1228 // GNU extension, __float128 for IEEE quadruple precision 1229 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1230 1231 // __ibm128 for IBM extended precision 1232 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1233 1234 // C11 extension ISO/IEC TS 18661-3 1235 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1236 1237 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1238 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1239 InitBuiltinType(AccumTy, BuiltinType::Accum); 1240 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1241 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1242 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1243 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1244 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1245 InitBuiltinType(FractTy, BuiltinType::Fract); 1246 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1247 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1248 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1249 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1250 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1251 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1252 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1253 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1254 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1255 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1256 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1257 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1258 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1259 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1260 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1261 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1262 1263 // GNU extension, 128-bit integers. 1264 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1265 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1266 1267 // C++ 3.9.1p5 1268 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1269 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1270 else // -fshort-wchar makes wchar_t be unsigned. 1271 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1272 if (LangOpts.CPlusPlus && LangOpts.WChar) 1273 WideCharTy = WCharTy; 1274 else { 1275 // C99 (or C++ using -fno-wchar). 1276 WideCharTy = getFromTargetType(Target.getWCharType()); 1277 } 1278 1279 WIntTy = getFromTargetType(Target.getWIntType()); 1280 1281 // C++20 (proposed) 1282 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1283 1284 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1285 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1286 else // C99 1287 Char16Ty = getFromTargetType(Target.getChar16Type()); 1288 1289 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1290 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1291 else // C99 1292 Char32Ty = getFromTargetType(Target.getChar32Type()); 1293 1294 // Placeholder type for type-dependent expressions whose type is 1295 // completely unknown. No code should ever check a type against 1296 // DependentTy and users should never see it; however, it is here to 1297 // help diagnose failures to properly check for type-dependent 1298 // expressions. 1299 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1300 1301 // Placeholder type for functions. 1302 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1303 1304 // Placeholder type for bound members. 1305 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1306 1307 // Placeholder type for pseudo-objects. 1308 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1309 1310 // "any" type; useful for debugger-like clients. 1311 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1312 1313 // Placeholder type for unbridged ARC casts. 1314 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1315 1316 // Placeholder type for builtin functions. 1317 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1318 1319 // Placeholder type for OMP array sections. 1320 if (LangOpts.OpenMP) { 1321 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1322 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1323 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1324 } 1325 // Placeholder type for OpenACC array sections. 1326 if (LangOpts.OpenACC) { 1327 // FIXME: Once we implement OpenACC array sections in Sema, this will either 1328 // be combined with the OpenMP type, or given its own type. In the meantime, 1329 // just use the OpenMP type so that parsing can work. 1330 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1331 } 1332 if (LangOpts.MatrixTypes) 1333 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1334 1335 // Builtin types for 'id', 'Class', and 'SEL'. 1336 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1337 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1338 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1339 1340 if (LangOpts.OpenCL) { 1341 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1342 InitBuiltinType(SingletonId, BuiltinType::Id); 1343 #include "clang/Basic/OpenCLImageTypes.def" 1344 1345 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1346 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1347 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1348 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1349 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1350 1351 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1352 InitBuiltinType(Id##Ty, BuiltinType::Id); 1353 #include "clang/Basic/OpenCLExtensionTypes.def" 1354 } 1355 1356 if (Target.hasAArch64SVETypes()) { 1357 #define SVE_TYPE(Name, Id, SingletonId) \ 1358 InitBuiltinType(SingletonId, BuiltinType::Id); 1359 #include "clang/Basic/AArch64SVEACLETypes.def" 1360 } 1361 1362 if (Target.getTriple().isPPC64()) { 1363 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1364 InitBuiltinType(Id##Ty, BuiltinType::Id); 1365 #include "clang/Basic/PPCTypes.def" 1366 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1367 InitBuiltinType(Id##Ty, BuiltinType::Id); 1368 #include "clang/Basic/PPCTypes.def" 1369 } 1370 1371 if (Target.hasRISCVVTypes()) { 1372 #define RVV_TYPE(Name, Id, SingletonId) \ 1373 InitBuiltinType(SingletonId, BuiltinType::Id); 1374 #include "clang/Basic/RISCVVTypes.def" 1375 } 1376 1377 if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) { 1378 #define WASM_TYPE(Name, Id, SingletonId) \ 1379 InitBuiltinType(SingletonId, BuiltinType::Id); 1380 #include "clang/Basic/WebAssemblyReferenceTypes.def" 1381 } 1382 1383 // Builtin type for __objc_yes and __objc_no 1384 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1385 SignedCharTy : BoolTy); 1386 1387 ObjCConstantStringType = QualType(); 1388 1389 ObjCSuperType = QualType(); 1390 1391 // void * type 1392 if (LangOpts.OpenCLGenericAddressSpace) { 1393 auto Q = VoidTy.getQualifiers(); 1394 Q.setAddressSpace(LangAS::opencl_generic); 1395 VoidPtrTy = getPointerType(getCanonicalType( 1396 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1397 } else { 1398 VoidPtrTy = getPointerType(VoidTy); 1399 } 1400 1401 // nullptr type (C++0x 2.14.7) 1402 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1403 1404 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1405 InitBuiltinType(HalfTy, BuiltinType::Half); 1406 1407 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1408 1409 // Builtin type used to help define __builtin_va_list. 1410 VaListTagDecl = nullptr; 1411 1412 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1413 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1414 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1415 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1416 } 1417 } 1418 1419 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1420 return SourceMgr.getDiagnostics(); 1421 } 1422 1423 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1424 AttrVec *&Result = DeclAttrs[D]; 1425 if (!Result) { 1426 void *Mem = Allocate(sizeof(AttrVec)); 1427 Result = new (Mem) AttrVec; 1428 } 1429 1430 return *Result; 1431 } 1432 1433 /// Erase the attributes corresponding to the given declaration. 1434 void ASTContext::eraseDeclAttrs(const Decl *D) { 1435 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1436 if (Pos != DeclAttrs.end()) { 1437 Pos->second->~AttrVec(); 1438 DeclAttrs.erase(Pos); 1439 } 1440 } 1441 1442 // FIXME: Remove ? 1443 MemberSpecializationInfo * 1444 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1445 assert(Var->isStaticDataMember() && "Not a static data member"); 1446 return getTemplateOrSpecializationInfo(Var) 1447 .dyn_cast<MemberSpecializationInfo *>(); 1448 } 1449 1450 ASTContext::TemplateOrSpecializationInfo 1451 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1452 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1453 TemplateOrInstantiation.find(Var); 1454 if (Pos == TemplateOrInstantiation.end()) 1455 return {}; 1456 1457 return Pos->second; 1458 } 1459 1460 void 1461 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1462 TemplateSpecializationKind TSK, 1463 SourceLocation PointOfInstantiation) { 1464 assert(Inst->isStaticDataMember() && "Not a static data member"); 1465 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1466 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1467 Tmpl, TSK, PointOfInstantiation)); 1468 } 1469 1470 void 1471 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1472 TemplateOrSpecializationInfo TSI) { 1473 assert(!TemplateOrInstantiation[Inst] && 1474 "Already noted what the variable was instantiated from"); 1475 TemplateOrInstantiation[Inst] = TSI; 1476 } 1477 1478 NamedDecl * 1479 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1480 return InstantiatedFromUsingDecl.lookup(UUD); 1481 } 1482 1483 void 1484 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1485 assert((isa<UsingDecl>(Pattern) || 1486 isa<UnresolvedUsingValueDecl>(Pattern) || 1487 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1488 "pattern decl is not a using decl"); 1489 assert((isa<UsingDecl>(Inst) || 1490 isa<UnresolvedUsingValueDecl>(Inst) || 1491 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1492 "instantiation did not produce a using decl"); 1493 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1494 InstantiatedFromUsingDecl[Inst] = Pattern; 1495 } 1496 1497 UsingEnumDecl * 1498 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1499 return InstantiatedFromUsingEnumDecl.lookup(UUD); 1500 } 1501 1502 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1503 UsingEnumDecl *Pattern) { 1504 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1505 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1506 } 1507 1508 UsingShadowDecl * 1509 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1510 return InstantiatedFromUsingShadowDecl.lookup(Inst); 1511 } 1512 1513 void 1514 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1515 UsingShadowDecl *Pattern) { 1516 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1517 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1518 } 1519 1520 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1521 return InstantiatedFromUnnamedFieldDecl.lookup(Field); 1522 } 1523 1524 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1525 FieldDecl *Tmpl) { 1526 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1527 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1528 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1529 "Already noted what unnamed field was instantiated from"); 1530 1531 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1532 } 1533 1534 ASTContext::overridden_cxx_method_iterator 1535 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1536 return overridden_methods(Method).begin(); 1537 } 1538 1539 ASTContext::overridden_cxx_method_iterator 1540 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1541 return overridden_methods(Method).end(); 1542 } 1543 1544 unsigned 1545 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1546 auto Range = overridden_methods(Method); 1547 return Range.end() - Range.begin(); 1548 } 1549 1550 ASTContext::overridden_method_range 1551 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1552 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1553 OverriddenMethods.find(Method->getCanonicalDecl()); 1554 if (Pos == OverriddenMethods.end()) 1555 return overridden_method_range(nullptr, nullptr); 1556 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1557 } 1558 1559 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1560 const CXXMethodDecl *Overridden) { 1561 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1562 OverriddenMethods[Method].push_back(Overridden); 1563 } 1564 1565 void ASTContext::getOverriddenMethods( 1566 const NamedDecl *D, 1567 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1568 assert(D); 1569 1570 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1571 Overridden.append(overridden_methods_begin(CXXMethod), 1572 overridden_methods_end(CXXMethod)); 1573 return; 1574 } 1575 1576 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1577 if (!Method) 1578 return; 1579 1580 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1581 Method->getOverriddenMethods(OverDecls); 1582 Overridden.append(OverDecls.begin(), OverDecls.end()); 1583 } 1584 1585 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1586 assert(!Import->getNextLocalImport() && 1587 "Import declaration already in the chain"); 1588 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1589 if (!FirstLocalImport) { 1590 FirstLocalImport = Import; 1591 LastLocalImport = Import; 1592 return; 1593 } 1594 1595 LastLocalImport->setNextLocalImport(Import); 1596 LastLocalImport = Import; 1597 } 1598 1599 //===----------------------------------------------------------------------===// 1600 // Type Sizing and Analysis 1601 //===----------------------------------------------------------------------===// 1602 1603 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1604 /// scalar floating point type. 1605 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1606 switch (T->castAs<BuiltinType>()->getKind()) { 1607 default: 1608 llvm_unreachable("Not a floating point type!"); 1609 case BuiltinType::BFloat16: 1610 return Target->getBFloat16Format(); 1611 case BuiltinType::Float16: 1612 return Target->getHalfFormat(); 1613 case BuiltinType::Half: 1614 // For HLSL, when the native half type is disabled, half will be treat as 1615 // float. 1616 if (getLangOpts().HLSL) 1617 if (getLangOpts().NativeHalfType) 1618 return Target->getHalfFormat(); 1619 else 1620 return Target->getFloatFormat(); 1621 else 1622 return Target->getHalfFormat(); 1623 case BuiltinType::Float: return Target->getFloatFormat(); 1624 case BuiltinType::Double: return Target->getDoubleFormat(); 1625 case BuiltinType::Ibm128: 1626 return Target->getIbm128Format(); 1627 case BuiltinType::LongDouble: 1628 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1629 return AuxTarget->getLongDoubleFormat(); 1630 return Target->getLongDoubleFormat(); 1631 case BuiltinType::Float128: 1632 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1633 return AuxTarget->getFloat128Format(); 1634 return Target->getFloat128Format(); 1635 } 1636 } 1637 1638 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1639 unsigned Align = Target->getCharWidth(); 1640 1641 const unsigned AlignFromAttr = D->getMaxAlignment(); 1642 if (AlignFromAttr) 1643 Align = AlignFromAttr; 1644 1645 // __attribute__((aligned)) can increase or decrease alignment 1646 // *except* on a struct or struct member, where it only increases 1647 // alignment unless 'packed' is also specified. 1648 // 1649 // It is an error for alignas to decrease alignment, so we can 1650 // ignore that possibility; Sema should diagnose it. 1651 bool UseAlignAttrOnly; 1652 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) 1653 UseAlignAttrOnly = 1654 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>(); 1655 else 1656 UseAlignAttrOnly = AlignFromAttr != 0; 1657 // If we're using the align attribute only, just ignore everything 1658 // else about the declaration and its type. 1659 if (UseAlignAttrOnly) { 1660 // do nothing 1661 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1662 QualType T = VD->getType(); 1663 if (const auto *RT = T->getAs<ReferenceType>()) { 1664 if (ForAlignof) 1665 T = RT->getPointeeType(); 1666 else 1667 T = getPointerType(RT->getPointeeType()); 1668 } 1669 QualType BaseT = getBaseElementType(T); 1670 if (T->isFunctionType()) 1671 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1672 else if (!BaseT->isIncompleteType()) { 1673 // Adjust alignments of declarations with array type by the 1674 // large-array alignment on the target. 1675 if (const ArrayType *arrayType = getAsArrayType(T)) { 1676 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1677 if (!ForAlignof && MinWidth) { 1678 if (isa<VariableArrayType>(arrayType)) 1679 Align = std::max(Align, Target->getLargeArrayAlign()); 1680 else if (isa<ConstantArrayType>(arrayType) && 1681 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1682 Align = std::max(Align, Target->getLargeArrayAlign()); 1683 } 1684 } 1685 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1686 if (BaseT.getQualifiers().hasUnaligned()) 1687 Align = Target->getCharWidth(); 1688 } 1689 1690 // Ensure miminum alignment for global variables. 1691 if (const auto *VD = dyn_cast<VarDecl>(D)) 1692 if (VD->hasGlobalStorage() && !ForAlignof) { 1693 uint64_t TypeSize = 1694 !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0; 1695 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1696 } 1697 1698 // Fields can be subject to extra alignment constraints, like if 1699 // the field is packed, the struct is packed, or the struct has a 1700 // a max-field-alignment constraint (#pragma pack). So calculate 1701 // the actual alignment of the field within the struct, and then 1702 // (as we're expected to) constrain that by the alignment of the type. 1703 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1704 const RecordDecl *Parent = Field->getParent(); 1705 // We can only produce a sensible answer if the record is valid. 1706 if (!Parent->isInvalidDecl()) { 1707 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1708 1709 // Start with the record's overall alignment. 1710 unsigned FieldAlign = toBits(Layout.getAlignment()); 1711 1712 // Use the GCD of that and the offset within the record. 1713 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1714 if (Offset > 0) { 1715 // Alignment is always a power of 2, so the GCD will be a power of 2, 1716 // which means we get to do this crazy thing instead of Euclid's. 1717 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1718 if (LowBitOfOffset < FieldAlign) 1719 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1720 } 1721 1722 Align = std::min(Align, FieldAlign); 1723 } 1724 } 1725 } 1726 1727 // Some targets have hard limitation on the maximum requestable alignment in 1728 // aligned attribute for static variables. 1729 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1730 const auto *VD = dyn_cast<VarDecl>(D); 1731 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1732 Align = std::min(Align, MaxAlignedAttr); 1733 1734 return toCharUnitsFromBits(Align); 1735 } 1736 1737 CharUnits ASTContext::getExnObjectAlignment() const { 1738 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1739 } 1740 1741 // getTypeInfoDataSizeInChars - Return the size of a type, in 1742 // chars. If the type is a record, its data size is returned. This is 1743 // the size of the memcpy that's performed when assigning this type 1744 // using a trivial copy/move assignment operator. 1745 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1746 TypeInfoChars Info = getTypeInfoInChars(T); 1747 1748 // In C++, objects can sometimes be allocated into the tail padding 1749 // of a base-class subobject. We decide whether that's possible 1750 // during class layout, so here we can just trust the layout results. 1751 if (getLangOpts().CPlusPlus) { 1752 if (const auto *RT = T->getAs<RecordType>()) { 1753 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1754 Info.Width = layout.getDataSize(); 1755 } 1756 } 1757 1758 return Info; 1759 } 1760 1761 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1762 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1763 TypeInfoChars 1764 static getConstantArrayInfoInChars(const ASTContext &Context, 1765 const ConstantArrayType *CAT) { 1766 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1767 uint64_t Size = CAT->getSize().getZExtValue(); 1768 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1769 (uint64_t)(-1)/Size) && 1770 "Overflow in array type char size evaluation"); 1771 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1772 unsigned Align = EltInfo.Align.getQuantity(); 1773 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1774 Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1775 Width = llvm::alignTo(Width, Align); 1776 return TypeInfoChars(CharUnits::fromQuantity(Width), 1777 CharUnits::fromQuantity(Align), 1778 EltInfo.AlignRequirement); 1779 } 1780 1781 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1782 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1783 return getConstantArrayInfoInChars(*this, CAT); 1784 TypeInfo Info = getTypeInfo(T); 1785 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1786 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1787 } 1788 1789 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1790 return getTypeInfoInChars(T.getTypePtr()); 1791 } 1792 1793 bool ASTContext::isPromotableIntegerType(QualType T) const { 1794 // HLSL doesn't promote all small integer types to int, it 1795 // just uses the rank-based promotion rules for all types. 1796 if (getLangOpts().HLSL) 1797 return false; 1798 1799 if (const auto *BT = T->getAs<BuiltinType>()) 1800 switch (BT->getKind()) { 1801 case BuiltinType::Bool: 1802 case BuiltinType::Char_S: 1803 case BuiltinType::Char_U: 1804 case BuiltinType::SChar: 1805 case BuiltinType::UChar: 1806 case BuiltinType::Short: 1807 case BuiltinType::UShort: 1808 case BuiltinType::WChar_S: 1809 case BuiltinType::WChar_U: 1810 case BuiltinType::Char8: 1811 case BuiltinType::Char16: 1812 case BuiltinType::Char32: 1813 return true; 1814 default: 1815 return false; 1816 } 1817 1818 // Enumerated types are promotable to their compatible integer types 1819 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). 1820 if (const auto *ET = T->getAs<EnumType>()) { 1821 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() || 1822 ET->getDecl()->isScoped()) 1823 return false; 1824 1825 return true; 1826 } 1827 1828 return false; 1829 } 1830 1831 bool ASTContext::isAlignmentRequired(const Type *T) const { 1832 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1833 } 1834 1835 bool ASTContext::isAlignmentRequired(QualType T) const { 1836 return isAlignmentRequired(T.getTypePtr()); 1837 } 1838 1839 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1840 bool NeedsPreferredAlignment) const { 1841 // An alignment on a typedef overrides anything else. 1842 if (const auto *TT = T->getAs<TypedefType>()) 1843 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1844 return Align; 1845 1846 // If we have an (array of) complete type, we're done. 1847 T = getBaseElementType(T); 1848 if (!T->isIncompleteType()) 1849 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1850 1851 // If we had an array type, its element type might be a typedef 1852 // type with an alignment attribute. 1853 if (const auto *TT = T->getAs<TypedefType>()) 1854 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1855 return Align; 1856 1857 // Otherwise, see if the declaration of the type had an attribute. 1858 if (const auto *TT = T->getAs<TagType>()) 1859 return TT->getDecl()->getMaxAlignment(); 1860 1861 return 0; 1862 } 1863 1864 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1865 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1866 if (I != MemoizedTypeInfo.end()) 1867 return I->second; 1868 1869 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1870 TypeInfo TI = getTypeInfoImpl(T); 1871 MemoizedTypeInfo[T] = TI; 1872 return TI; 1873 } 1874 1875 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1876 /// method does not work on incomplete types. 1877 /// 1878 /// FIXME: Pointers into different addr spaces could have different sizes and 1879 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1880 /// should take a QualType, &c. 1881 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1882 uint64_t Width = 0; 1883 unsigned Align = 8; 1884 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1885 LangAS AS = LangAS::Default; 1886 switch (T->getTypeClass()) { 1887 #define TYPE(Class, Base) 1888 #define ABSTRACT_TYPE(Class, Base) 1889 #define NON_CANONICAL_TYPE(Class, Base) 1890 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1891 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1892 case Type::Class: \ 1893 assert(!T->isDependentType() && "should not see dependent types here"); \ 1894 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1895 #include "clang/AST/TypeNodes.inc" 1896 llvm_unreachable("Should not see dependent types"); 1897 1898 case Type::FunctionNoProto: 1899 case Type::FunctionProto: 1900 // GCC extension: alignof(function) = 32 bits 1901 Width = 0; 1902 Align = 32; 1903 break; 1904 1905 case Type::IncompleteArray: 1906 case Type::VariableArray: 1907 case Type::ConstantArray: { 1908 // Model non-constant sized arrays as size zero, but track the alignment. 1909 uint64_t Size = 0; 1910 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1911 Size = CAT->getSize().getZExtValue(); 1912 1913 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1914 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1915 "Overflow in array type bit size evaluation"); 1916 Width = EltInfo.Width * Size; 1917 Align = EltInfo.Align; 1918 AlignRequirement = EltInfo.AlignRequirement; 1919 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1920 getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1921 Width = llvm::alignTo(Width, Align); 1922 break; 1923 } 1924 1925 case Type::ExtVector: 1926 case Type::Vector: { 1927 const auto *VT = cast<VectorType>(T); 1928 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1929 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 1930 : EltInfo.Width * VT->getNumElements(); 1931 // Enforce at least byte size and alignment. 1932 Width = std::max<unsigned>(8, Width); 1933 Align = std::max<unsigned>(8, Width); 1934 1935 // If the alignment is not a power of 2, round up to the next power of 2. 1936 // This happens for non-power-of-2 length vectors. 1937 if (Align & (Align-1)) { 1938 Align = llvm::bit_ceil(Align); 1939 Width = llvm::alignTo(Width, Align); 1940 } 1941 // Adjust the alignment based on the target max. 1942 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1943 if (TargetVectorAlign && TargetVectorAlign < Align) 1944 Align = TargetVectorAlign; 1945 if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 1946 // Adjust the alignment for fixed-length SVE vectors. This is important 1947 // for non-power-of-2 vector lengths. 1948 Align = 128; 1949 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 1950 // Adjust the alignment for fixed-length SVE predicates. 1951 Align = 16; 1952 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || 1953 VT->getVectorKind() == VectorKind::RVVFixedLengthMask) 1954 // Adjust the alignment for fixed-length RVV vectors. 1955 Align = std::min<unsigned>(64, Width); 1956 break; 1957 } 1958 1959 case Type::ConstantMatrix: { 1960 const auto *MT = cast<ConstantMatrixType>(T); 1961 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 1962 // The internal layout of a matrix value is implementation defined. 1963 // Initially be ABI compatible with arrays with respect to alignment and 1964 // size. 1965 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 1966 Align = ElementInfo.Align; 1967 break; 1968 } 1969 1970 case Type::Builtin: 1971 switch (cast<BuiltinType>(T)->getKind()) { 1972 default: llvm_unreachable("Unknown builtin type!"); 1973 case BuiltinType::Void: 1974 // GCC extension: alignof(void) = 8 bits. 1975 Width = 0; 1976 Align = 8; 1977 break; 1978 case BuiltinType::Bool: 1979 Width = Target->getBoolWidth(); 1980 Align = Target->getBoolAlign(); 1981 break; 1982 case BuiltinType::Char_S: 1983 case BuiltinType::Char_U: 1984 case BuiltinType::UChar: 1985 case BuiltinType::SChar: 1986 case BuiltinType::Char8: 1987 Width = Target->getCharWidth(); 1988 Align = Target->getCharAlign(); 1989 break; 1990 case BuiltinType::WChar_S: 1991 case BuiltinType::WChar_U: 1992 Width = Target->getWCharWidth(); 1993 Align = Target->getWCharAlign(); 1994 break; 1995 case BuiltinType::Char16: 1996 Width = Target->getChar16Width(); 1997 Align = Target->getChar16Align(); 1998 break; 1999 case BuiltinType::Char32: 2000 Width = Target->getChar32Width(); 2001 Align = Target->getChar32Align(); 2002 break; 2003 case BuiltinType::UShort: 2004 case BuiltinType::Short: 2005 Width = Target->getShortWidth(); 2006 Align = Target->getShortAlign(); 2007 break; 2008 case BuiltinType::UInt: 2009 case BuiltinType::Int: 2010 Width = Target->getIntWidth(); 2011 Align = Target->getIntAlign(); 2012 break; 2013 case BuiltinType::ULong: 2014 case BuiltinType::Long: 2015 Width = Target->getLongWidth(); 2016 Align = Target->getLongAlign(); 2017 break; 2018 case BuiltinType::ULongLong: 2019 case BuiltinType::LongLong: 2020 Width = Target->getLongLongWidth(); 2021 Align = Target->getLongLongAlign(); 2022 break; 2023 case BuiltinType::Int128: 2024 case BuiltinType::UInt128: 2025 Width = 128; 2026 Align = Target->getInt128Align(); 2027 break; 2028 case BuiltinType::ShortAccum: 2029 case BuiltinType::UShortAccum: 2030 case BuiltinType::SatShortAccum: 2031 case BuiltinType::SatUShortAccum: 2032 Width = Target->getShortAccumWidth(); 2033 Align = Target->getShortAccumAlign(); 2034 break; 2035 case BuiltinType::Accum: 2036 case BuiltinType::UAccum: 2037 case BuiltinType::SatAccum: 2038 case BuiltinType::SatUAccum: 2039 Width = Target->getAccumWidth(); 2040 Align = Target->getAccumAlign(); 2041 break; 2042 case BuiltinType::LongAccum: 2043 case BuiltinType::ULongAccum: 2044 case BuiltinType::SatLongAccum: 2045 case BuiltinType::SatULongAccum: 2046 Width = Target->getLongAccumWidth(); 2047 Align = Target->getLongAccumAlign(); 2048 break; 2049 case BuiltinType::ShortFract: 2050 case BuiltinType::UShortFract: 2051 case BuiltinType::SatShortFract: 2052 case BuiltinType::SatUShortFract: 2053 Width = Target->getShortFractWidth(); 2054 Align = Target->getShortFractAlign(); 2055 break; 2056 case BuiltinType::Fract: 2057 case BuiltinType::UFract: 2058 case BuiltinType::SatFract: 2059 case BuiltinType::SatUFract: 2060 Width = Target->getFractWidth(); 2061 Align = Target->getFractAlign(); 2062 break; 2063 case BuiltinType::LongFract: 2064 case BuiltinType::ULongFract: 2065 case BuiltinType::SatLongFract: 2066 case BuiltinType::SatULongFract: 2067 Width = Target->getLongFractWidth(); 2068 Align = Target->getLongFractAlign(); 2069 break; 2070 case BuiltinType::BFloat16: 2071 if (Target->hasBFloat16Type()) { 2072 Width = Target->getBFloat16Width(); 2073 Align = Target->getBFloat16Align(); 2074 } else if ((getLangOpts().SYCLIsDevice || 2075 (getLangOpts().OpenMP && 2076 getLangOpts().OpenMPIsTargetDevice)) && 2077 AuxTarget->hasBFloat16Type()) { 2078 Width = AuxTarget->getBFloat16Width(); 2079 Align = AuxTarget->getBFloat16Align(); 2080 } 2081 break; 2082 case BuiltinType::Float16: 2083 case BuiltinType::Half: 2084 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2085 !getLangOpts().OpenMPIsTargetDevice) { 2086 Width = Target->getHalfWidth(); 2087 Align = Target->getHalfAlign(); 2088 } else { 2089 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2090 "Expected OpenMP device compilation."); 2091 Width = AuxTarget->getHalfWidth(); 2092 Align = AuxTarget->getHalfAlign(); 2093 } 2094 break; 2095 case BuiltinType::Float: 2096 Width = Target->getFloatWidth(); 2097 Align = Target->getFloatAlign(); 2098 break; 2099 case BuiltinType::Double: 2100 Width = Target->getDoubleWidth(); 2101 Align = Target->getDoubleAlign(); 2102 break; 2103 case BuiltinType::Ibm128: 2104 Width = Target->getIbm128Width(); 2105 Align = Target->getIbm128Align(); 2106 break; 2107 case BuiltinType::LongDouble: 2108 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2109 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2110 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2111 Width = AuxTarget->getLongDoubleWidth(); 2112 Align = AuxTarget->getLongDoubleAlign(); 2113 } else { 2114 Width = Target->getLongDoubleWidth(); 2115 Align = Target->getLongDoubleAlign(); 2116 } 2117 break; 2118 case BuiltinType::Float128: 2119 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2120 !getLangOpts().OpenMPIsTargetDevice) { 2121 Width = Target->getFloat128Width(); 2122 Align = Target->getFloat128Align(); 2123 } else { 2124 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2125 "Expected OpenMP device compilation."); 2126 Width = AuxTarget->getFloat128Width(); 2127 Align = AuxTarget->getFloat128Align(); 2128 } 2129 break; 2130 case BuiltinType::NullPtr: 2131 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*) 2132 Width = Target->getPointerWidth(LangAS::Default); 2133 Align = Target->getPointerAlign(LangAS::Default); 2134 break; 2135 case BuiltinType::ObjCId: 2136 case BuiltinType::ObjCClass: 2137 case BuiltinType::ObjCSel: 2138 Width = Target->getPointerWidth(LangAS::Default); 2139 Align = Target->getPointerAlign(LangAS::Default); 2140 break; 2141 case BuiltinType::OCLSampler: 2142 case BuiltinType::OCLEvent: 2143 case BuiltinType::OCLClkEvent: 2144 case BuiltinType::OCLQueue: 2145 case BuiltinType::OCLReserveID: 2146 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2147 case BuiltinType::Id: 2148 #include "clang/Basic/OpenCLImageTypes.def" 2149 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2150 case BuiltinType::Id: 2151 #include "clang/Basic/OpenCLExtensionTypes.def" 2152 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 2153 Width = Target->getPointerWidth(AS); 2154 Align = Target->getPointerAlign(AS); 2155 break; 2156 // The SVE types are effectively target-specific. The length of an 2157 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2158 // of 128 bits. There is one predicate bit for each vector byte, so the 2159 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2160 // 2161 // Because the length is only known at runtime, we use a dummy value 2162 // of 0 for the static length. The alignment values are those defined 2163 // by the Procedure Call Standard for the Arm Architecture. 2164 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2165 IsSigned, IsFP, IsBF) \ 2166 case BuiltinType::Id: \ 2167 Width = 0; \ 2168 Align = 128; \ 2169 break; 2170 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2171 case BuiltinType::Id: \ 2172 Width = 0; \ 2173 Align = 16; \ 2174 break; 2175 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \ 2176 case BuiltinType::Id: \ 2177 Width = 0; \ 2178 Align = 16; \ 2179 break; 2180 #include "clang/Basic/AArch64SVEACLETypes.def" 2181 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2182 case BuiltinType::Id: \ 2183 Width = Size; \ 2184 Align = Size; \ 2185 break; 2186 #include "clang/Basic/PPCTypes.def" 2187 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2188 IsFP, IsBF) \ 2189 case BuiltinType::Id: \ 2190 Width = 0; \ 2191 Align = ElBits; \ 2192 break; 2193 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2194 case BuiltinType::Id: \ 2195 Width = 0; \ 2196 Align = 8; \ 2197 break; 2198 #include "clang/Basic/RISCVVTypes.def" 2199 #define WASM_TYPE(Name, Id, SingletonId) \ 2200 case BuiltinType::Id: \ 2201 Width = 0; \ 2202 Align = 8; \ 2203 break; 2204 #include "clang/Basic/WebAssemblyReferenceTypes.def" 2205 } 2206 break; 2207 case Type::ObjCObjectPointer: 2208 Width = Target->getPointerWidth(LangAS::Default); 2209 Align = Target->getPointerAlign(LangAS::Default); 2210 break; 2211 case Type::BlockPointer: 2212 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace(); 2213 Width = Target->getPointerWidth(AS); 2214 Align = Target->getPointerAlign(AS); 2215 break; 2216 case Type::LValueReference: 2217 case Type::RValueReference: 2218 // alignof and sizeof should never enter this code path here, so we go 2219 // the pointer route. 2220 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace(); 2221 Width = Target->getPointerWidth(AS); 2222 Align = Target->getPointerAlign(AS); 2223 break; 2224 case Type::Pointer: 2225 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace(); 2226 Width = Target->getPointerWidth(AS); 2227 Align = Target->getPointerAlign(AS); 2228 break; 2229 case Type::MemberPointer: { 2230 const auto *MPT = cast<MemberPointerType>(T); 2231 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2232 Width = MPI.Width; 2233 Align = MPI.Align; 2234 break; 2235 } 2236 case Type::Complex: { 2237 // Complex types have the same alignment as their elements, but twice the 2238 // size. 2239 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2240 Width = EltInfo.Width * 2; 2241 Align = EltInfo.Align; 2242 break; 2243 } 2244 case Type::ObjCObject: 2245 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2246 case Type::Adjusted: 2247 case Type::Decayed: 2248 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2249 case Type::ObjCInterface: { 2250 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2251 if (ObjCI->getDecl()->isInvalidDecl()) { 2252 Width = 8; 2253 Align = 8; 2254 break; 2255 } 2256 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2257 Width = toBits(Layout.getSize()); 2258 Align = toBits(Layout.getAlignment()); 2259 break; 2260 } 2261 case Type::BitInt: { 2262 const auto *EIT = cast<BitIntType>(T); 2263 Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()), 2264 getCharWidth(), Target->getLongLongAlign()); 2265 Width = llvm::alignTo(EIT->getNumBits(), Align); 2266 break; 2267 } 2268 case Type::Record: 2269 case Type::Enum: { 2270 const auto *TT = cast<TagType>(T); 2271 2272 if (TT->getDecl()->isInvalidDecl()) { 2273 Width = 8; 2274 Align = 8; 2275 break; 2276 } 2277 2278 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2279 const EnumDecl *ED = ET->getDecl(); 2280 TypeInfo Info = 2281 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2282 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2283 Info.Align = AttrAlign; 2284 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2285 } 2286 return Info; 2287 } 2288 2289 const auto *RT = cast<RecordType>(TT); 2290 const RecordDecl *RD = RT->getDecl(); 2291 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2292 Width = toBits(Layout.getSize()); 2293 Align = toBits(Layout.getAlignment()); 2294 AlignRequirement = RD->hasAttr<AlignedAttr>() 2295 ? AlignRequirementKind::RequiredByRecord 2296 : AlignRequirementKind::None; 2297 break; 2298 } 2299 2300 case Type::SubstTemplateTypeParm: 2301 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2302 getReplacementType().getTypePtr()); 2303 2304 case Type::Auto: 2305 case Type::DeducedTemplateSpecialization: { 2306 const auto *A = cast<DeducedType>(T); 2307 assert(!A->getDeducedType().isNull() && 2308 "cannot request the size of an undeduced or dependent auto type"); 2309 return getTypeInfo(A->getDeducedType().getTypePtr()); 2310 } 2311 2312 case Type::Paren: 2313 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2314 2315 case Type::MacroQualified: 2316 return getTypeInfo( 2317 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2318 2319 case Type::ObjCTypeParam: 2320 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2321 2322 case Type::Using: 2323 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2324 2325 case Type::Typedef: { 2326 const auto *TT = cast<TypedefType>(T); 2327 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr()); 2328 // If the typedef has an aligned attribute on it, it overrides any computed 2329 // alignment we have. This violates the GCC documentation (which says that 2330 // attribute(aligned) can only round up) but matches its implementation. 2331 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) { 2332 Align = AttrAlign; 2333 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2334 } else { 2335 Align = Info.Align; 2336 AlignRequirement = Info.AlignRequirement; 2337 } 2338 Width = Info.Width; 2339 break; 2340 } 2341 2342 case Type::Elaborated: 2343 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2344 2345 case Type::Attributed: 2346 return getTypeInfo( 2347 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2348 2349 case Type::BTFTagAttributed: 2350 return getTypeInfo( 2351 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2352 2353 case Type::Atomic: { 2354 // Start with the base type information. 2355 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2356 Width = Info.Width; 2357 Align = Info.Align; 2358 2359 if (!Width) { 2360 // An otherwise zero-sized type should still generate an 2361 // atomic operation. 2362 Width = Target->getCharWidth(); 2363 assert(Align); 2364 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2365 // If the size of the type doesn't exceed the platform's max 2366 // atomic promotion width, make the size and alignment more 2367 // favorable to atomic operations: 2368 2369 // Round the size up to a power of 2. 2370 Width = llvm::bit_ceil(Width); 2371 2372 // Set the alignment equal to the size. 2373 Align = static_cast<unsigned>(Width); 2374 } 2375 } 2376 break; 2377 2378 case Type::Pipe: 2379 Width = Target->getPointerWidth(LangAS::opencl_global); 2380 Align = Target->getPointerAlign(LangAS::opencl_global); 2381 break; 2382 } 2383 2384 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2385 return TypeInfo(Width, Align, AlignRequirement); 2386 } 2387 2388 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2389 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2390 if (I != MemoizedUnadjustedAlign.end()) 2391 return I->second; 2392 2393 unsigned UnadjustedAlign; 2394 if (const auto *RT = T->getAs<RecordType>()) { 2395 const RecordDecl *RD = RT->getDecl(); 2396 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2397 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2398 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2399 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2400 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2401 } else { 2402 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2403 } 2404 2405 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2406 return UnadjustedAlign; 2407 } 2408 2409 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2410 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign( 2411 getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap); 2412 return SimdAlign; 2413 } 2414 2415 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2416 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2417 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2418 } 2419 2420 /// toBits - Convert a size in characters to a size in characters. 2421 int64_t ASTContext::toBits(CharUnits CharSize) const { 2422 return CharSize.getQuantity() * getCharWidth(); 2423 } 2424 2425 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2426 /// This method does not work on incomplete types. 2427 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2428 return getTypeInfoInChars(T).Width; 2429 } 2430 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2431 return getTypeInfoInChars(T).Width; 2432 } 2433 2434 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2435 /// characters. This method does not work on incomplete types. 2436 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2437 return toCharUnitsFromBits(getTypeAlign(T)); 2438 } 2439 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2440 return toCharUnitsFromBits(getTypeAlign(T)); 2441 } 2442 2443 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2444 /// type, in characters, before alignment adjustments. This method does 2445 /// not work on incomplete types. 2446 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2447 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2448 } 2449 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2450 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2451 } 2452 2453 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2454 /// type for the current target in bits. This can be different than the ABI 2455 /// alignment in cases where it is beneficial for performance or backwards 2456 /// compatibility preserving to overalign a data type. (Note: despite the name, 2457 /// the preferred alignment is ABI-impacting, and not an optimization.) 2458 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2459 TypeInfo TI = getTypeInfo(T); 2460 unsigned ABIAlign = TI.Align; 2461 2462 T = T->getBaseElementTypeUnsafe(); 2463 2464 // The preferred alignment of member pointers is that of a pointer. 2465 if (T->isMemberPointerType()) 2466 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2467 2468 if (!Target->allowsLargerPreferedTypeAlignment()) 2469 return ABIAlign; 2470 2471 if (const auto *RT = T->getAs<RecordType>()) { 2472 const RecordDecl *RD = RT->getDecl(); 2473 2474 // When used as part of a typedef, or together with a 'packed' attribute, 2475 // the 'aligned' attribute can be used to decrease alignment. Note that the 2476 // 'packed' case is already taken into consideration when computing the 2477 // alignment, we only need to handle the typedef case here. 2478 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2479 RD->isInvalidDecl()) 2480 return ABIAlign; 2481 2482 unsigned PreferredAlign = static_cast<unsigned>( 2483 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2484 assert(PreferredAlign >= ABIAlign && 2485 "PreferredAlign should be at least as large as ABIAlign."); 2486 return PreferredAlign; 2487 } 2488 2489 // Double (and, for targets supporting AIX `power` alignment, long double) and 2490 // long long should be naturally aligned (despite requiring less alignment) if 2491 // possible. 2492 if (const auto *CT = T->getAs<ComplexType>()) 2493 T = CT->getElementType().getTypePtr(); 2494 if (const auto *ET = T->getAs<EnumType>()) 2495 T = ET->getDecl()->getIntegerType().getTypePtr(); 2496 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2497 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2498 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2499 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2500 Target->defaultsToAIXPowerAlignment())) 2501 // Don't increase the alignment if an alignment attribute was specified on a 2502 // typedef declaration. 2503 if (!TI.isAlignRequired()) 2504 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2505 2506 return ABIAlign; 2507 } 2508 2509 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2510 /// for __attribute__((aligned)) on this target, to be used if no alignment 2511 /// value is specified. 2512 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2513 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2514 } 2515 2516 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2517 /// to a global variable of the specified type. 2518 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2519 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2520 return std::max(getPreferredTypeAlign(T), 2521 getTargetInfo().getMinGlobalAlign(TypeSize)); 2522 } 2523 2524 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2525 /// should be given to a global variable of the specified type. 2526 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2527 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2528 } 2529 2530 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2531 CharUnits Offset = CharUnits::Zero(); 2532 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2533 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2534 Offset += Layout->getBaseClassOffset(Base); 2535 Layout = &getASTRecordLayout(Base); 2536 } 2537 return Offset; 2538 } 2539 2540 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2541 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2542 CharUnits ThisAdjustment = CharUnits::Zero(); 2543 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2544 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2545 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2546 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2547 const CXXRecordDecl *Base = RD; 2548 const CXXRecordDecl *Derived = Path[I]; 2549 if (DerivedMember) 2550 std::swap(Base, Derived); 2551 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2552 RD = Path[I]; 2553 } 2554 if (DerivedMember) 2555 ThisAdjustment = -ThisAdjustment; 2556 return ThisAdjustment; 2557 } 2558 2559 /// DeepCollectObjCIvars - 2560 /// This routine first collects all declared, but not synthesized, ivars in 2561 /// super class and then collects all ivars, including those synthesized for 2562 /// current class. This routine is used for implementation of current class 2563 /// when all ivars, declared and synthesized are known. 2564 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2565 bool leafClass, 2566 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2567 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2568 DeepCollectObjCIvars(SuperClass, false, Ivars); 2569 if (!leafClass) { 2570 llvm::append_range(Ivars, OI->ivars()); 2571 } else { 2572 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2573 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2574 Iv= Iv->getNextIvar()) 2575 Ivars.push_back(Iv); 2576 } 2577 } 2578 2579 /// CollectInheritedProtocols - Collect all protocols in current class and 2580 /// those inherited by it. 2581 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2582 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2583 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2584 // We can use protocol_iterator here instead of 2585 // all_referenced_protocol_iterator since we are walking all categories. 2586 for (auto *Proto : OI->all_referenced_protocols()) { 2587 CollectInheritedProtocols(Proto, Protocols); 2588 } 2589 2590 // Categories of this Interface. 2591 for (const auto *Cat : OI->visible_categories()) 2592 CollectInheritedProtocols(Cat, Protocols); 2593 2594 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2595 while (SD) { 2596 CollectInheritedProtocols(SD, Protocols); 2597 SD = SD->getSuperClass(); 2598 } 2599 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2600 for (auto *Proto : OC->protocols()) { 2601 CollectInheritedProtocols(Proto, Protocols); 2602 } 2603 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2604 // Insert the protocol. 2605 if (!Protocols.insert( 2606 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2607 return; 2608 2609 for (auto *Proto : OP->protocols()) 2610 CollectInheritedProtocols(Proto, Protocols); 2611 } 2612 } 2613 2614 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2615 const RecordDecl *RD, 2616 bool CheckIfTriviallyCopyable) { 2617 assert(RD->isUnion() && "Must be union type"); 2618 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2619 2620 for (const auto *Field : RD->fields()) { 2621 if (!Context.hasUniqueObjectRepresentations(Field->getType(), 2622 CheckIfTriviallyCopyable)) 2623 return false; 2624 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2625 if (FieldSize != UnionSize) 2626 return false; 2627 } 2628 return !RD->field_empty(); 2629 } 2630 2631 static int64_t getSubobjectOffset(const FieldDecl *Field, 2632 const ASTContext &Context, 2633 const clang::ASTRecordLayout & /*Layout*/) { 2634 return Context.getFieldOffset(Field); 2635 } 2636 2637 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2638 const ASTContext &Context, 2639 const clang::ASTRecordLayout &Layout) { 2640 return Context.toBits(Layout.getBaseClassOffset(RD)); 2641 } 2642 2643 static std::optional<int64_t> 2644 structHasUniqueObjectRepresentations(const ASTContext &Context, 2645 const RecordDecl *RD, 2646 bool CheckIfTriviallyCopyable); 2647 2648 static std::optional<int64_t> 2649 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context, 2650 bool CheckIfTriviallyCopyable) { 2651 if (Field->getType()->isRecordType()) { 2652 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2653 if (!RD->isUnion()) 2654 return structHasUniqueObjectRepresentations(Context, RD, 2655 CheckIfTriviallyCopyable); 2656 } 2657 2658 // A _BitInt type may not be unique if it has padding bits 2659 // but if it is a bitfield the padding bits are not used. 2660 bool IsBitIntType = Field->getType()->isBitIntType(); 2661 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2662 !Context.hasUniqueObjectRepresentations(Field->getType(), 2663 CheckIfTriviallyCopyable)) 2664 return std::nullopt; 2665 2666 int64_t FieldSizeInBits = 2667 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2668 if (Field->isBitField()) { 2669 // If we have explicit padding bits, they don't contribute bits 2670 // to the actual object representation, so return 0. 2671 if (Field->isUnnamedBitfield()) 2672 return 0; 2673 2674 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2675 if (IsBitIntType) { 2676 if ((unsigned)BitfieldSize > 2677 cast<BitIntType>(Field->getType())->getNumBits()) 2678 return std::nullopt; 2679 } else if (BitfieldSize > FieldSizeInBits) { 2680 return std::nullopt; 2681 } 2682 FieldSizeInBits = BitfieldSize; 2683 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations( 2684 Field->getType(), CheckIfTriviallyCopyable)) { 2685 return std::nullopt; 2686 } 2687 return FieldSizeInBits; 2688 } 2689 2690 static std::optional<int64_t> 2691 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context, 2692 bool CheckIfTriviallyCopyable) { 2693 return structHasUniqueObjectRepresentations(Context, RD, 2694 CheckIfTriviallyCopyable); 2695 } 2696 2697 template <typename RangeT> 2698 static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2699 const RangeT &Subobjects, int64_t CurOffsetInBits, 2700 const ASTContext &Context, const clang::ASTRecordLayout &Layout, 2701 bool CheckIfTriviallyCopyable) { 2702 for (const auto *Subobject : Subobjects) { 2703 std::optional<int64_t> SizeInBits = 2704 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable); 2705 if (!SizeInBits) 2706 return std::nullopt; 2707 if (*SizeInBits != 0) { 2708 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2709 if (Offset != CurOffsetInBits) 2710 return std::nullopt; 2711 CurOffsetInBits += *SizeInBits; 2712 } 2713 } 2714 return CurOffsetInBits; 2715 } 2716 2717 static std::optional<int64_t> 2718 structHasUniqueObjectRepresentations(const ASTContext &Context, 2719 const RecordDecl *RD, 2720 bool CheckIfTriviallyCopyable) { 2721 assert(!RD->isUnion() && "Must be struct/class type"); 2722 const auto &Layout = Context.getASTRecordLayout(RD); 2723 2724 int64_t CurOffsetInBits = 0; 2725 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2726 if (ClassDecl->isDynamicClass()) 2727 return std::nullopt; 2728 2729 SmallVector<CXXRecordDecl *, 4> Bases; 2730 for (const auto &Base : ClassDecl->bases()) { 2731 // Empty types can be inherited from, and non-empty types can potentially 2732 // have tail padding, so just make sure there isn't an error. 2733 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2734 } 2735 2736 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2737 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2738 }); 2739 2740 std::optional<int64_t> OffsetAfterBases = 2741 structSubobjectsHaveUniqueObjectRepresentations( 2742 Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable); 2743 if (!OffsetAfterBases) 2744 return std::nullopt; 2745 CurOffsetInBits = *OffsetAfterBases; 2746 } 2747 2748 std::optional<int64_t> OffsetAfterFields = 2749 structSubobjectsHaveUniqueObjectRepresentations( 2750 RD->fields(), CurOffsetInBits, Context, Layout, 2751 CheckIfTriviallyCopyable); 2752 if (!OffsetAfterFields) 2753 return std::nullopt; 2754 CurOffsetInBits = *OffsetAfterFields; 2755 2756 return CurOffsetInBits; 2757 } 2758 2759 bool ASTContext::hasUniqueObjectRepresentations( 2760 QualType Ty, bool CheckIfTriviallyCopyable) const { 2761 // C++17 [meta.unary.prop]: 2762 // The predicate condition for a template specialization 2763 // has_unique_object_representations<T> shall be satisfied if and only if: 2764 // (9.1) - T is trivially copyable, and 2765 // (9.2) - any two objects of type T with the same value have the same 2766 // object representation, where: 2767 // - two objects of array or non-union class type are considered to have 2768 // the same value if their respective sequences of direct subobjects 2769 // have the same values, and 2770 // - two objects of union type are considered to have the same value if 2771 // they have the same active member and the corresponding members have 2772 // the same value. 2773 // The set of scalar types for which this condition holds is 2774 // implementation-defined. [ Note: If a type has padding bits, the condition 2775 // does not hold; otherwise, the condition holds true for unsigned integral 2776 // types. -- end note ] 2777 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2778 2779 // Arrays are unique only if their element type is unique. 2780 if (Ty->isArrayType()) 2781 return hasUniqueObjectRepresentations(getBaseElementType(Ty), 2782 CheckIfTriviallyCopyable); 2783 2784 // (9.1) - T is trivially copyable... 2785 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this)) 2786 return false; 2787 2788 // All integrals and enums are unique. 2789 if (Ty->isIntegralOrEnumerationType()) { 2790 // Except _BitInt types that have padding bits. 2791 if (const auto *BIT = Ty->getAs<BitIntType>()) 2792 return getTypeSize(BIT) == BIT->getNumBits(); 2793 2794 return true; 2795 } 2796 2797 // All other pointers are unique. 2798 if (Ty->isPointerType()) 2799 return true; 2800 2801 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 2802 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2803 2804 if (Ty->isRecordType()) { 2805 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2806 2807 if (Record->isInvalidDecl()) 2808 return false; 2809 2810 if (Record->isUnion()) 2811 return unionHasUniqueObjectRepresentations(*this, Record, 2812 CheckIfTriviallyCopyable); 2813 2814 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations( 2815 *this, Record, CheckIfTriviallyCopyable); 2816 2817 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); 2818 } 2819 2820 // FIXME: More cases to handle here (list by rsmith): 2821 // vectors (careful about, eg, vector of 3 foo) 2822 // _Complex int and friends 2823 // _Atomic T 2824 // Obj-C block pointers 2825 // Obj-C object pointers 2826 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2827 // clk_event_t, queue_t, reserve_id_t) 2828 // There're also Obj-C class types and the Obj-C selector type, but I think it 2829 // makes sense for those to return false here. 2830 2831 return false; 2832 } 2833 2834 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2835 unsigned count = 0; 2836 // Count ivars declared in class extension. 2837 for (const auto *Ext : OI->known_extensions()) 2838 count += Ext->ivar_size(); 2839 2840 // Count ivar defined in this class's implementation. This 2841 // includes synthesized ivars. 2842 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2843 count += ImplDecl->ivar_size(); 2844 2845 return count; 2846 } 2847 2848 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2849 if (!E) 2850 return false; 2851 2852 // nullptr_t is always treated as null. 2853 if (E->getType()->isNullPtrType()) return true; 2854 2855 if (E->getType()->isAnyPointerType() && 2856 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2857 Expr::NPC_ValueDependentIsNull)) 2858 return true; 2859 2860 // Unfortunately, __null has type 'int'. 2861 if (isa<GNUNullExpr>(E)) return true; 2862 2863 return false; 2864 } 2865 2866 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2867 /// exists. 2868 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2869 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2870 I = ObjCImpls.find(D); 2871 if (I != ObjCImpls.end()) 2872 return cast<ObjCImplementationDecl>(I->second); 2873 return nullptr; 2874 } 2875 2876 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2877 /// exists. 2878 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2879 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2880 I = ObjCImpls.find(D); 2881 if (I != ObjCImpls.end()) 2882 return cast<ObjCCategoryImplDecl>(I->second); 2883 return nullptr; 2884 } 2885 2886 /// Set the implementation of ObjCInterfaceDecl. 2887 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2888 ObjCImplementationDecl *ImplD) { 2889 assert(IFaceD && ImplD && "Passed null params"); 2890 ObjCImpls[IFaceD] = ImplD; 2891 } 2892 2893 /// Set the implementation of ObjCCategoryDecl. 2894 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2895 ObjCCategoryImplDecl *ImplD) { 2896 assert(CatD && ImplD && "Passed null params"); 2897 ObjCImpls[CatD] = ImplD; 2898 } 2899 2900 const ObjCMethodDecl * 2901 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2902 return ObjCMethodRedecls.lookup(MD); 2903 } 2904 2905 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2906 const ObjCMethodDecl *Redecl) { 2907 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2908 ObjCMethodRedecls[MD] = Redecl; 2909 } 2910 2911 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2912 const NamedDecl *ND) const { 2913 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2914 return ID; 2915 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2916 return CD->getClassInterface(); 2917 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2918 return IMD->getClassInterface(); 2919 2920 return nullptr; 2921 } 2922 2923 /// Get the copy initialization expression of VarDecl, or nullptr if 2924 /// none exists. 2925 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2926 assert(VD && "Passed null params"); 2927 assert(VD->hasAttr<BlocksAttr>() && 2928 "getBlockVarCopyInits - not __block var"); 2929 auto I = BlockVarCopyInits.find(VD); 2930 if (I != BlockVarCopyInits.end()) 2931 return I->second; 2932 return {nullptr, false}; 2933 } 2934 2935 /// Set the copy initialization expression of a block var decl. 2936 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2937 bool CanThrow) { 2938 assert(VD && CopyExpr && "Passed null params"); 2939 assert(VD->hasAttr<BlocksAttr>() && 2940 "setBlockVarCopyInits - not __block var"); 2941 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2942 } 2943 2944 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2945 unsigned DataSize) const { 2946 if (!DataSize) 2947 DataSize = TypeLoc::getFullDataSizeForType(T); 2948 else 2949 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2950 "incorrect data size provided to CreateTypeSourceInfo!"); 2951 2952 auto *TInfo = 2953 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2954 new (TInfo) TypeSourceInfo(T, DataSize); 2955 return TInfo; 2956 } 2957 2958 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2959 SourceLocation L) const { 2960 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2961 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2962 return DI; 2963 } 2964 2965 const ASTRecordLayout & 2966 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2967 return getObjCLayout(D, nullptr); 2968 } 2969 2970 const ASTRecordLayout & 2971 ASTContext::getASTObjCImplementationLayout( 2972 const ObjCImplementationDecl *D) const { 2973 return getObjCLayout(D->getClassInterface(), D); 2974 } 2975 2976 static auto getCanonicalTemplateArguments(const ASTContext &C, 2977 ArrayRef<TemplateArgument> Args, 2978 bool &AnyNonCanonArgs) { 2979 SmallVector<TemplateArgument, 16> CanonArgs(Args); 2980 for (auto &Arg : CanonArgs) { 2981 TemplateArgument OrigArg = Arg; 2982 Arg = C.getCanonicalTemplateArgument(Arg); 2983 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg); 2984 } 2985 return CanonArgs; 2986 } 2987 2988 //===----------------------------------------------------------------------===// 2989 // Type creation/memoization methods 2990 //===----------------------------------------------------------------------===// 2991 2992 QualType 2993 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 2994 unsigned fastQuals = quals.getFastQualifiers(); 2995 quals.removeFastQualifiers(); 2996 2997 // Check if we've already instantiated this type. 2998 llvm::FoldingSetNodeID ID; 2999 ExtQuals::Profile(ID, baseType, quals); 3000 void *insertPos = nullptr; 3001 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 3002 assert(eq->getQualifiers() == quals); 3003 return QualType(eq, fastQuals); 3004 } 3005 3006 // If the base type is not canonical, make the appropriate canonical type. 3007 QualType canon; 3008 if (!baseType->isCanonicalUnqualified()) { 3009 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 3010 canonSplit.Quals.addConsistentQualifiers(quals); 3011 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3012 3013 // Re-find the insert position. 3014 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3015 } 3016 3017 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals); 3018 ExtQualNodes.InsertNode(eq, insertPos); 3019 return QualType(eq, fastQuals); 3020 } 3021 3022 QualType ASTContext::getAddrSpaceQualType(QualType T, 3023 LangAS AddressSpace) const { 3024 QualType CanT = getCanonicalType(T); 3025 if (CanT.getAddressSpace() == AddressSpace) 3026 return T; 3027 3028 // If we are composing extended qualifiers together, merge together 3029 // into one ExtQuals node. 3030 QualifierCollector Quals; 3031 const Type *TypeNode = Quals.strip(T); 3032 3033 // If this type already has an address space specified, it cannot get 3034 // another one. 3035 assert(!Quals.hasAddressSpace() && 3036 "Type cannot be in multiple addr spaces!"); 3037 Quals.addAddressSpace(AddressSpace); 3038 3039 return getExtQualType(TypeNode, Quals); 3040 } 3041 3042 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3043 // If the type is not qualified with an address space, just return it 3044 // immediately. 3045 if (!T.hasAddressSpace()) 3046 return T; 3047 3048 // If we are composing extended qualifiers together, merge together 3049 // into one ExtQuals node. 3050 QualifierCollector Quals; 3051 const Type *TypeNode; 3052 3053 while (T.hasAddressSpace()) { 3054 TypeNode = Quals.strip(T); 3055 3056 // If the type no longer has an address space after stripping qualifiers, 3057 // jump out. 3058 if (!QualType(TypeNode, 0).hasAddressSpace()) 3059 break; 3060 3061 // There might be sugar in the way. Strip it and try again. 3062 T = T.getSingleStepDesugaredType(*this); 3063 } 3064 3065 Quals.removeAddressSpace(); 3066 3067 // Removal of the address space can mean there are no longer any 3068 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3069 // or required. 3070 if (Quals.hasNonFastQualifiers()) 3071 return getExtQualType(TypeNode, Quals); 3072 else 3073 return QualType(TypeNode, Quals.getFastQualifiers()); 3074 } 3075 3076 QualType ASTContext::getObjCGCQualType(QualType T, 3077 Qualifiers::GC GCAttr) const { 3078 QualType CanT = getCanonicalType(T); 3079 if (CanT.getObjCGCAttr() == GCAttr) 3080 return T; 3081 3082 if (const auto *ptr = T->getAs<PointerType>()) { 3083 QualType Pointee = ptr->getPointeeType(); 3084 if (Pointee->isAnyPointerType()) { 3085 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3086 return getPointerType(ResultType); 3087 } 3088 } 3089 3090 // If we are composing extended qualifiers together, merge together 3091 // into one ExtQuals node. 3092 QualifierCollector Quals; 3093 const Type *TypeNode = Quals.strip(T); 3094 3095 // If this type already has an ObjCGC specified, it cannot get 3096 // another one. 3097 assert(!Quals.hasObjCGCAttr() && 3098 "Type cannot have multiple ObjCGCs!"); 3099 Quals.addObjCGCAttr(GCAttr); 3100 3101 return getExtQualType(TypeNode, Quals); 3102 } 3103 3104 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3105 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3106 QualType Pointee = Ptr->getPointeeType(); 3107 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3108 return getPointerType(removeAddrSpaceQualType(Pointee)); 3109 } 3110 } 3111 return T; 3112 } 3113 3114 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3115 FunctionType::ExtInfo Info) { 3116 if (T->getExtInfo() == Info) 3117 return T; 3118 3119 QualType Result; 3120 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3121 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3122 } else { 3123 const auto *FPT = cast<FunctionProtoType>(T); 3124 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3125 EPI.ExtInfo = Info; 3126 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3127 } 3128 3129 return cast<FunctionType>(Result.getTypePtr()); 3130 } 3131 3132 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3133 QualType ResultType) { 3134 FD = FD->getMostRecentDecl(); 3135 while (true) { 3136 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3137 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3138 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3139 if (FunctionDecl *Next = FD->getPreviousDecl()) 3140 FD = Next; 3141 else 3142 break; 3143 } 3144 if (ASTMutationListener *L = getASTMutationListener()) 3145 L->DeducedReturnType(FD, ResultType); 3146 } 3147 3148 /// Get a function type and produce the equivalent function type with the 3149 /// specified exception specification. Type sugar that can be present on a 3150 /// declaration of a function with an exception specification is permitted 3151 /// and preserved. Other type sugar (for instance, typedefs) is not. 3152 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3153 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { 3154 // Might have some parens. 3155 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3156 return getParenType( 3157 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3158 3159 // Might be wrapped in a macro qualified type. 3160 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3161 return getMacroQualifiedType( 3162 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3163 MQT->getMacroIdentifier()); 3164 3165 // Might have a calling-convention attribute. 3166 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3167 return getAttributedType( 3168 AT->getAttrKind(), 3169 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3170 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3171 3172 // Anything else must be a function type. Rebuild it with the new exception 3173 // specification. 3174 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3175 return getFunctionType( 3176 Proto->getReturnType(), Proto->getParamTypes(), 3177 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3178 } 3179 3180 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3181 QualType U) const { 3182 return hasSameType(T, U) || 3183 (getLangOpts().CPlusPlus17 && 3184 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3185 getFunctionTypeWithExceptionSpec(U, EST_None))); 3186 } 3187 3188 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3189 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3190 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3191 SmallVector<QualType, 16> Args(Proto->param_types().size()); 3192 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3193 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]); 3194 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3195 } 3196 3197 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3198 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3199 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3200 } 3201 3202 return T; 3203 } 3204 3205 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3206 return hasSameType(T, U) || 3207 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3208 getFunctionTypeWithoutPtrSizes(U)); 3209 } 3210 3211 void ASTContext::adjustExceptionSpec( 3212 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3213 bool AsWritten) { 3214 // Update the type. 3215 QualType Updated = 3216 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3217 FD->setType(Updated); 3218 3219 if (!AsWritten) 3220 return; 3221 3222 // Update the type in the type source information too. 3223 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3224 // If the type and the type-as-written differ, we may need to update 3225 // the type-as-written too. 3226 if (TSInfo->getType() != FD->getType()) 3227 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3228 3229 // FIXME: When we get proper type location information for exceptions, 3230 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3231 // up the TypeSourceInfo; 3232 assert(TypeLoc::getFullDataSizeForType(Updated) == 3233 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3234 "TypeLoc size mismatch from updating exception specification"); 3235 TSInfo->overrideType(Updated); 3236 } 3237 } 3238 3239 /// getComplexType - Return the uniqued reference to the type for a complex 3240 /// number with the specified element type. 3241 QualType ASTContext::getComplexType(QualType T) const { 3242 // Unique pointers, to guarantee there is only one pointer of a particular 3243 // structure. 3244 llvm::FoldingSetNodeID ID; 3245 ComplexType::Profile(ID, T); 3246 3247 void *InsertPos = nullptr; 3248 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3249 return QualType(CT, 0); 3250 3251 // If the pointee type isn't canonical, this won't be a canonical type either, 3252 // so fill in the canonical type field. 3253 QualType Canonical; 3254 if (!T.isCanonical()) { 3255 Canonical = getComplexType(getCanonicalType(T)); 3256 3257 // Get the new insert position for the node we care about. 3258 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3259 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3260 } 3261 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical); 3262 Types.push_back(New); 3263 ComplexTypes.InsertNode(New, InsertPos); 3264 return QualType(New, 0); 3265 } 3266 3267 /// getPointerType - Return the uniqued reference to the type for a pointer to 3268 /// the specified type. 3269 QualType ASTContext::getPointerType(QualType T) const { 3270 // Unique pointers, to guarantee there is only one pointer of a particular 3271 // structure. 3272 llvm::FoldingSetNodeID ID; 3273 PointerType::Profile(ID, T); 3274 3275 void *InsertPos = nullptr; 3276 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3277 return QualType(PT, 0); 3278 3279 // If the pointee type isn't canonical, this won't be a canonical type either, 3280 // so fill in the canonical type field. 3281 QualType Canonical; 3282 if (!T.isCanonical()) { 3283 Canonical = getPointerType(getCanonicalType(T)); 3284 3285 // Get the new insert position for the node we care about. 3286 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3287 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3288 } 3289 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical); 3290 Types.push_back(New); 3291 PointerTypes.InsertNode(New, InsertPos); 3292 return QualType(New, 0); 3293 } 3294 3295 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3296 llvm::FoldingSetNodeID ID; 3297 AdjustedType::Profile(ID, Orig, New); 3298 void *InsertPos = nullptr; 3299 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3300 if (AT) 3301 return QualType(AT, 0); 3302 3303 QualType Canonical = getCanonicalType(New); 3304 3305 // Get the new insert position for the node we care about. 3306 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3307 assert(!AT && "Shouldn't be in the map!"); 3308 3309 AT = new (*this, alignof(AdjustedType)) 3310 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3311 Types.push_back(AT); 3312 AdjustedTypes.InsertNode(AT, InsertPos); 3313 return QualType(AT, 0); 3314 } 3315 3316 QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const { 3317 llvm::FoldingSetNodeID ID; 3318 AdjustedType::Profile(ID, Orig, Decayed); 3319 void *InsertPos = nullptr; 3320 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3321 if (AT) 3322 return QualType(AT, 0); 3323 3324 QualType Canonical = getCanonicalType(Decayed); 3325 3326 // Get the new insert position for the node we care about. 3327 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3328 assert(!AT && "Shouldn't be in the map!"); 3329 3330 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical); 3331 Types.push_back(AT); 3332 AdjustedTypes.InsertNode(AT, InsertPos); 3333 return QualType(AT, 0); 3334 } 3335 3336 QualType ASTContext::getDecayedType(QualType T) const { 3337 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3338 3339 QualType Decayed; 3340 3341 // C99 6.7.5.3p7: 3342 // A declaration of a parameter as "array of type" shall be 3343 // adjusted to "qualified pointer to type", where the type 3344 // qualifiers (if any) are those specified within the [ and ] of 3345 // the array type derivation. 3346 if (T->isArrayType()) 3347 Decayed = getArrayDecayedType(T); 3348 3349 // C99 6.7.5.3p8: 3350 // A declaration of a parameter as "function returning type" 3351 // shall be adjusted to "pointer to function returning type", as 3352 // in 6.3.2.1. 3353 if (T->isFunctionType()) 3354 Decayed = getPointerType(T); 3355 3356 return getDecayedType(T, Decayed); 3357 } 3358 3359 /// getBlockPointerType - Return the uniqued reference to the type for 3360 /// a pointer to the specified block. 3361 QualType ASTContext::getBlockPointerType(QualType T) const { 3362 assert(T->isFunctionType() && "block of function types only"); 3363 // Unique pointers, to guarantee there is only one block of a particular 3364 // structure. 3365 llvm::FoldingSetNodeID ID; 3366 BlockPointerType::Profile(ID, T); 3367 3368 void *InsertPos = nullptr; 3369 if (BlockPointerType *PT = 3370 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3371 return QualType(PT, 0); 3372 3373 // If the block pointee type isn't canonical, this won't be a canonical 3374 // type either so fill in the canonical type field. 3375 QualType Canonical; 3376 if (!T.isCanonical()) { 3377 Canonical = getBlockPointerType(getCanonicalType(T)); 3378 3379 // Get the new insert position for the node we care about. 3380 BlockPointerType *NewIP = 3381 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3382 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3383 } 3384 auto *New = 3385 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical); 3386 Types.push_back(New); 3387 BlockPointerTypes.InsertNode(New, InsertPos); 3388 return QualType(New, 0); 3389 } 3390 3391 /// getLValueReferenceType - Return the uniqued reference to the type for an 3392 /// lvalue reference to the specified type. 3393 QualType 3394 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3395 assert((!T->isPlaceholderType() || 3396 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3397 "Unresolved placeholder type"); 3398 3399 // Unique pointers, to guarantee there is only one pointer of a particular 3400 // structure. 3401 llvm::FoldingSetNodeID ID; 3402 ReferenceType::Profile(ID, T, SpelledAsLValue); 3403 3404 void *InsertPos = nullptr; 3405 if (LValueReferenceType *RT = 3406 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3407 return QualType(RT, 0); 3408 3409 const auto *InnerRef = T->getAs<ReferenceType>(); 3410 3411 // If the referencee type isn't canonical, this won't be a canonical type 3412 // either, so fill in the canonical type field. 3413 QualType Canonical; 3414 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3415 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3416 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3417 3418 // Get the new insert position for the node we care about. 3419 LValueReferenceType *NewIP = 3420 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3421 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3422 } 3423 3424 auto *New = new (*this, alignof(LValueReferenceType)) 3425 LValueReferenceType(T, Canonical, SpelledAsLValue); 3426 Types.push_back(New); 3427 LValueReferenceTypes.InsertNode(New, InsertPos); 3428 3429 return QualType(New, 0); 3430 } 3431 3432 /// getRValueReferenceType - Return the uniqued reference to the type for an 3433 /// rvalue reference to the specified type. 3434 QualType ASTContext::getRValueReferenceType(QualType T) const { 3435 assert((!T->isPlaceholderType() || 3436 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3437 "Unresolved placeholder type"); 3438 3439 // Unique pointers, to guarantee there is only one pointer of a particular 3440 // structure. 3441 llvm::FoldingSetNodeID ID; 3442 ReferenceType::Profile(ID, T, false); 3443 3444 void *InsertPos = nullptr; 3445 if (RValueReferenceType *RT = 3446 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3447 return QualType(RT, 0); 3448 3449 const auto *InnerRef = T->getAs<ReferenceType>(); 3450 3451 // If the referencee type isn't canonical, this won't be a canonical type 3452 // either, so fill in the canonical type field. 3453 QualType Canonical; 3454 if (InnerRef || !T.isCanonical()) { 3455 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3456 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3457 3458 // Get the new insert position for the node we care about. 3459 RValueReferenceType *NewIP = 3460 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3461 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3462 } 3463 3464 auto *New = new (*this, alignof(RValueReferenceType)) 3465 RValueReferenceType(T, Canonical); 3466 Types.push_back(New); 3467 RValueReferenceTypes.InsertNode(New, InsertPos); 3468 return QualType(New, 0); 3469 } 3470 3471 /// getMemberPointerType - Return the uniqued reference to the type for a 3472 /// member pointer to the specified type, in the specified class. 3473 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3474 // Unique pointers, to guarantee there is only one pointer of a particular 3475 // structure. 3476 llvm::FoldingSetNodeID ID; 3477 MemberPointerType::Profile(ID, T, Cls); 3478 3479 void *InsertPos = nullptr; 3480 if (MemberPointerType *PT = 3481 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3482 return QualType(PT, 0); 3483 3484 // If the pointee or class type isn't canonical, this won't be a canonical 3485 // type either, so fill in the canonical type field. 3486 QualType Canonical; 3487 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3488 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3489 3490 // Get the new insert position for the node we care about. 3491 MemberPointerType *NewIP = 3492 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3493 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3494 } 3495 auto *New = new (*this, alignof(MemberPointerType)) 3496 MemberPointerType(T, Cls, Canonical); 3497 Types.push_back(New); 3498 MemberPointerTypes.InsertNode(New, InsertPos); 3499 return QualType(New, 0); 3500 } 3501 3502 /// getConstantArrayType - Return the unique reference to the type for an 3503 /// array of the specified element type. 3504 QualType ASTContext::getConstantArrayType(QualType EltTy, 3505 const llvm::APInt &ArySizeIn, 3506 const Expr *SizeExpr, 3507 ArraySizeModifier ASM, 3508 unsigned IndexTypeQuals) const { 3509 assert((EltTy->isDependentType() || 3510 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3511 "Constant array of VLAs is illegal!"); 3512 3513 // We only need the size as part of the type if it's instantiation-dependent. 3514 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3515 SizeExpr = nullptr; 3516 3517 // Convert the array size into a canonical width matching the pointer size for 3518 // the target. 3519 llvm::APInt ArySize(ArySizeIn); 3520 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3521 3522 llvm::FoldingSetNodeID ID; 3523 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3524 IndexTypeQuals); 3525 3526 void *InsertPos = nullptr; 3527 if (ConstantArrayType *ATP = 3528 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3529 return QualType(ATP, 0); 3530 3531 // If the element type isn't canonical or has qualifiers, or the array bound 3532 // is instantiation-dependent, this won't be a canonical type either, so fill 3533 // in the canonical type field. 3534 QualType Canon; 3535 // FIXME: Check below should look for qualifiers behind sugar. 3536 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3537 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3538 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3539 ASM, IndexTypeQuals); 3540 Canon = getQualifiedType(Canon, canonSplit.Quals); 3541 3542 // Get the new insert position for the node we care about. 3543 ConstantArrayType *NewIP = 3544 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3545 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3546 } 3547 3548 void *Mem = Allocate( 3549 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3550 alignof(ConstantArrayType)); 3551 auto *New = new (Mem) 3552 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3553 ConstantArrayTypes.InsertNode(New, InsertPos); 3554 Types.push_back(New); 3555 return QualType(New, 0); 3556 } 3557 3558 /// getVariableArrayDecayedType - Turns the given type, which may be 3559 /// variably-modified, into the corresponding type with all the known 3560 /// sizes replaced with [*]. 3561 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3562 // Vastly most common case. 3563 if (!type->isVariablyModifiedType()) return type; 3564 3565 QualType result; 3566 3567 SplitQualType split = type.getSplitDesugaredType(); 3568 const Type *ty = split.Ty; 3569 switch (ty->getTypeClass()) { 3570 #define TYPE(Class, Base) 3571 #define ABSTRACT_TYPE(Class, Base) 3572 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3573 #include "clang/AST/TypeNodes.inc" 3574 llvm_unreachable("didn't desugar past all non-canonical types?"); 3575 3576 // These types should never be variably-modified. 3577 case Type::Builtin: 3578 case Type::Complex: 3579 case Type::Vector: 3580 case Type::DependentVector: 3581 case Type::ExtVector: 3582 case Type::DependentSizedExtVector: 3583 case Type::ConstantMatrix: 3584 case Type::DependentSizedMatrix: 3585 case Type::DependentAddressSpace: 3586 case Type::ObjCObject: 3587 case Type::ObjCInterface: 3588 case Type::ObjCObjectPointer: 3589 case Type::Record: 3590 case Type::Enum: 3591 case Type::UnresolvedUsing: 3592 case Type::TypeOfExpr: 3593 case Type::TypeOf: 3594 case Type::Decltype: 3595 case Type::UnaryTransform: 3596 case Type::DependentName: 3597 case Type::InjectedClassName: 3598 case Type::TemplateSpecialization: 3599 case Type::DependentTemplateSpecialization: 3600 case Type::TemplateTypeParm: 3601 case Type::SubstTemplateTypeParmPack: 3602 case Type::Auto: 3603 case Type::DeducedTemplateSpecialization: 3604 case Type::PackExpansion: 3605 case Type::BitInt: 3606 case Type::DependentBitInt: 3607 llvm_unreachable("type should never be variably-modified"); 3608 3609 // These types can be variably-modified but should never need to 3610 // further decay. 3611 case Type::FunctionNoProto: 3612 case Type::FunctionProto: 3613 case Type::BlockPointer: 3614 case Type::MemberPointer: 3615 case Type::Pipe: 3616 return type; 3617 3618 // These types can be variably-modified. All these modifications 3619 // preserve structure except as noted by comments. 3620 // TODO: if we ever care about optimizing VLAs, there are no-op 3621 // optimizations available here. 3622 case Type::Pointer: 3623 result = getPointerType(getVariableArrayDecayedType( 3624 cast<PointerType>(ty)->getPointeeType())); 3625 break; 3626 3627 case Type::LValueReference: { 3628 const auto *lv = cast<LValueReferenceType>(ty); 3629 result = getLValueReferenceType( 3630 getVariableArrayDecayedType(lv->getPointeeType()), 3631 lv->isSpelledAsLValue()); 3632 break; 3633 } 3634 3635 case Type::RValueReference: { 3636 const auto *lv = cast<RValueReferenceType>(ty); 3637 result = getRValueReferenceType( 3638 getVariableArrayDecayedType(lv->getPointeeType())); 3639 break; 3640 } 3641 3642 case Type::Atomic: { 3643 const auto *at = cast<AtomicType>(ty); 3644 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3645 break; 3646 } 3647 3648 case Type::ConstantArray: { 3649 const auto *cat = cast<ConstantArrayType>(ty); 3650 result = getConstantArrayType( 3651 getVariableArrayDecayedType(cat->getElementType()), 3652 cat->getSize(), 3653 cat->getSizeExpr(), 3654 cat->getSizeModifier(), 3655 cat->getIndexTypeCVRQualifiers()); 3656 break; 3657 } 3658 3659 case Type::DependentSizedArray: { 3660 const auto *dat = cast<DependentSizedArrayType>(ty); 3661 result = getDependentSizedArrayType( 3662 getVariableArrayDecayedType(dat->getElementType()), 3663 dat->getSizeExpr(), 3664 dat->getSizeModifier(), 3665 dat->getIndexTypeCVRQualifiers(), 3666 dat->getBracketsRange()); 3667 break; 3668 } 3669 3670 // Turn incomplete types into [*] types. 3671 case Type::IncompleteArray: { 3672 const auto *iat = cast<IncompleteArrayType>(ty); 3673 result = 3674 getVariableArrayType(getVariableArrayDecayedType(iat->getElementType()), 3675 /*size*/ nullptr, ArraySizeModifier::Normal, 3676 iat->getIndexTypeCVRQualifiers(), SourceRange()); 3677 break; 3678 } 3679 3680 // Turn VLA types into [*] types. 3681 case Type::VariableArray: { 3682 const auto *vat = cast<VariableArrayType>(ty); 3683 result = getVariableArrayType( 3684 getVariableArrayDecayedType(vat->getElementType()), 3685 /*size*/ nullptr, ArraySizeModifier::Star, 3686 vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange()); 3687 break; 3688 } 3689 } 3690 3691 // Apply the top-level qualifiers from the original. 3692 return getQualifiedType(result, split.Quals); 3693 } 3694 3695 /// getVariableArrayType - Returns a non-unique reference to the type for a 3696 /// variable array of the specified element type. 3697 QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts, 3698 ArraySizeModifier ASM, 3699 unsigned IndexTypeQuals, 3700 SourceRange Brackets) const { 3701 // Since we don't unique expressions, it isn't possible to unique VLA's 3702 // that have an expression provided for their size. 3703 QualType Canon; 3704 3705 // Be sure to pull qualifiers off the element type. 3706 // FIXME: Check below should look for qualifiers behind sugar. 3707 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3708 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3709 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3710 IndexTypeQuals, Brackets); 3711 Canon = getQualifiedType(Canon, canonSplit.Quals); 3712 } 3713 3714 auto *New = new (*this, alignof(VariableArrayType)) 3715 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3716 3717 VariableArrayTypes.push_back(New); 3718 Types.push_back(New); 3719 return QualType(New, 0); 3720 } 3721 3722 /// getDependentSizedArrayType - Returns a non-unique reference to 3723 /// the type for a dependently-sized array of the specified element 3724 /// type. 3725 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3726 Expr *numElements, 3727 ArraySizeModifier ASM, 3728 unsigned elementTypeQuals, 3729 SourceRange brackets) const { 3730 assert((!numElements || numElements->isTypeDependent() || 3731 numElements->isValueDependent()) && 3732 "Size must be type- or value-dependent!"); 3733 3734 // Dependently-sized array types that do not have a specified number 3735 // of elements will have their sizes deduced from a dependent 3736 // initializer. We do no canonicalization here at all, which is okay 3737 // because they can't be used in most locations. 3738 if (!numElements) { 3739 auto *newType = new (*this, alignof(DependentSizedArrayType)) 3740 DependentSizedArrayType(elementType, QualType(), numElements, ASM, 3741 elementTypeQuals, brackets); 3742 Types.push_back(newType); 3743 return QualType(newType, 0); 3744 } 3745 3746 // Otherwise, we actually build a new type every time, but we 3747 // also build a canonical type. 3748 3749 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3750 3751 void *insertPos = nullptr; 3752 llvm::FoldingSetNodeID ID; 3753 DependentSizedArrayType::Profile(ID, *this, 3754 QualType(canonElementType.Ty, 0), 3755 ASM, elementTypeQuals, numElements); 3756 3757 // Look for an existing type with these properties. 3758 DependentSizedArrayType *canonTy = 3759 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3760 3761 // If we don't have one, build one. 3762 if (!canonTy) { 3763 canonTy = new (*this, alignof(DependentSizedArrayType)) 3764 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(), 3765 numElements, ASM, elementTypeQuals, brackets); 3766 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3767 Types.push_back(canonTy); 3768 } 3769 3770 // Apply qualifiers from the element type to the array. 3771 QualType canon = getQualifiedType(QualType(canonTy,0), 3772 canonElementType.Quals); 3773 3774 // If we didn't need extra canonicalization for the element type or the size 3775 // expression, then just use that as our result. 3776 if (QualType(canonElementType.Ty, 0) == elementType && 3777 canonTy->getSizeExpr() == numElements) 3778 return canon; 3779 3780 // Otherwise, we need to build a type which follows the spelling 3781 // of the element type. 3782 auto *sugaredType = new (*this, alignof(DependentSizedArrayType)) 3783 DependentSizedArrayType(elementType, canon, numElements, ASM, 3784 elementTypeQuals, brackets); 3785 Types.push_back(sugaredType); 3786 return QualType(sugaredType, 0); 3787 } 3788 3789 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3790 ArraySizeModifier ASM, 3791 unsigned elementTypeQuals) const { 3792 llvm::FoldingSetNodeID ID; 3793 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3794 3795 void *insertPos = nullptr; 3796 if (IncompleteArrayType *iat = 3797 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3798 return QualType(iat, 0); 3799 3800 // If the element type isn't canonical, this won't be a canonical type 3801 // either, so fill in the canonical type field. We also have to pull 3802 // qualifiers off the element type. 3803 QualType canon; 3804 3805 // FIXME: Check below should look for qualifiers behind sugar. 3806 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3807 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3808 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3809 ASM, elementTypeQuals); 3810 canon = getQualifiedType(canon, canonSplit.Quals); 3811 3812 // Get the new insert position for the node we care about. 3813 IncompleteArrayType *existing = 3814 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3815 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3816 } 3817 3818 auto *newType = new (*this, alignof(IncompleteArrayType)) 3819 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3820 3821 IncompleteArrayTypes.InsertNode(newType, insertPos); 3822 Types.push_back(newType); 3823 return QualType(newType, 0); 3824 } 3825 3826 ASTContext::BuiltinVectorTypeInfo 3827 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3828 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3829 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3830 NUMVECTORS}; 3831 3832 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3833 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3834 3835 switch (Ty->getKind()) { 3836 default: 3837 llvm_unreachable("Unsupported builtin vector type"); 3838 case BuiltinType::SveInt8: 3839 return SVE_INT_ELTTY(8, 16, true, 1); 3840 case BuiltinType::SveUint8: 3841 return SVE_INT_ELTTY(8, 16, false, 1); 3842 case BuiltinType::SveInt8x2: 3843 return SVE_INT_ELTTY(8, 16, true, 2); 3844 case BuiltinType::SveUint8x2: 3845 return SVE_INT_ELTTY(8, 16, false, 2); 3846 case BuiltinType::SveInt8x3: 3847 return SVE_INT_ELTTY(8, 16, true, 3); 3848 case BuiltinType::SveUint8x3: 3849 return SVE_INT_ELTTY(8, 16, false, 3); 3850 case BuiltinType::SveInt8x4: 3851 return SVE_INT_ELTTY(8, 16, true, 4); 3852 case BuiltinType::SveUint8x4: 3853 return SVE_INT_ELTTY(8, 16, false, 4); 3854 case BuiltinType::SveInt16: 3855 return SVE_INT_ELTTY(16, 8, true, 1); 3856 case BuiltinType::SveUint16: 3857 return SVE_INT_ELTTY(16, 8, false, 1); 3858 case BuiltinType::SveInt16x2: 3859 return SVE_INT_ELTTY(16, 8, true, 2); 3860 case BuiltinType::SveUint16x2: 3861 return SVE_INT_ELTTY(16, 8, false, 2); 3862 case BuiltinType::SveInt16x3: 3863 return SVE_INT_ELTTY(16, 8, true, 3); 3864 case BuiltinType::SveUint16x3: 3865 return SVE_INT_ELTTY(16, 8, false, 3); 3866 case BuiltinType::SveInt16x4: 3867 return SVE_INT_ELTTY(16, 8, true, 4); 3868 case BuiltinType::SveUint16x4: 3869 return SVE_INT_ELTTY(16, 8, false, 4); 3870 case BuiltinType::SveInt32: 3871 return SVE_INT_ELTTY(32, 4, true, 1); 3872 case BuiltinType::SveUint32: 3873 return SVE_INT_ELTTY(32, 4, false, 1); 3874 case BuiltinType::SveInt32x2: 3875 return SVE_INT_ELTTY(32, 4, true, 2); 3876 case BuiltinType::SveUint32x2: 3877 return SVE_INT_ELTTY(32, 4, false, 2); 3878 case BuiltinType::SveInt32x3: 3879 return SVE_INT_ELTTY(32, 4, true, 3); 3880 case BuiltinType::SveUint32x3: 3881 return SVE_INT_ELTTY(32, 4, false, 3); 3882 case BuiltinType::SveInt32x4: 3883 return SVE_INT_ELTTY(32, 4, true, 4); 3884 case BuiltinType::SveUint32x4: 3885 return SVE_INT_ELTTY(32, 4, false, 4); 3886 case BuiltinType::SveInt64: 3887 return SVE_INT_ELTTY(64, 2, true, 1); 3888 case BuiltinType::SveUint64: 3889 return SVE_INT_ELTTY(64, 2, false, 1); 3890 case BuiltinType::SveInt64x2: 3891 return SVE_INT_ELTTY(64, 2, true, 2); 3892 case BuiltinType::SveUint64x2: 3893 return SVE_INT_ELTTY(64, 2, false, 2); 3894 case BuiltinType::SveInt64x3: 3895 return SVE_INT_ELTTY(64, 2, true, 3); 3896 case BuiltinType::SveUint64x3: 3897 return SVE_INT_ELTTY(64, 2, false, 3); 3898 case BuiltinType::SveInt64x4: 3899 return SVE_INT_ELTTY(64, 2, true, 4); 3900 case BuiltinType::SveUint64x4: 3901 return SVE_INT_ELTTY(64, 2, false, 4); 3902 case BuiltinType::SveBool: 3903 return SVE_ELTTY(BoolTy, 16, 1); 3904 case BuiltinType::SveBoolx2: 3905 return SVE_ELTTY(BoolTy, 16, 2); 3906 case BuiltinType::SveBoolx4: 3907 return SVE_ELTTY(BoolTy, 16, 4); 3908 case BuiltinType::SveFloat16: 3909 return SVE_ELTTY(HalfTy, 8, 1); 3910 case BuiltinType::SveFloat16x2: 3911 return SVE_ELTTY(HalfTy, 8, 2); 3912 case BuiltinType::SveFloat16x3: 3913 return SVE_ELTTY(HalfTy, 8, 3); 3914 case BuiltinType::SveFloat16x4: 3915 return SVE_ELTTY(HalfTy, 8, 4); 3916 case BuiltinType::SveFloat32: 3917 return SVE_ELTTY(FloatTy, 4, 1); 3918 case BuiltinType::SveFloat32x2: 3919 return SVE_ELTTY(FloatTy, 4, 2); 3920 case BuiltinType::SveFloat32x3: 3921 return SVE_ELTTY(FloatTy, 4, 3); 3922 case BuiltinType::SveFloat32x4: 3923 return SVE_ELTTY(FloatTy, 4, 4); 3924 case BuiltinType::SveFloat64: 3925 return SVE_ELTTY(DoubleTy, 2, 1); 3926 case BuiltinType::SveFloat64x2: 3927 return SVE_ELTTY(DoubleTy, 2, 2); 3928 case BuiltinType::SveFloat64x3: 3929 return SVE_ELTTY(DoubleTy, 2, 3); 3930 case BuiltinType::SveFloat64x4: 3931 return SVE_ELTTY(DoubleTy, 2, 4); 3932 case BuiltinType::SveBFloat16: 3933 return SVE_ELTTY(BFloat16Ty, 8, 1); 3934 case BuiltinType::SveBFloat16x2: 3935 return SVE_ELTTY(BFloat16Ty, 8, 2); 3936 case BuiltinType::SveBFloat16x3: 3937 return SVE_ELTTY(BFloat16Ty, 8, 3); 3938 case BuiltinType::SveBFloat16x4: 3939 return SVE_ELTTY(BFloat16Ty, 8, 4); 3940 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3941 IsSigned) \ 3942 case BuiltinType::Id: \ 3943 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3944 llvm::ElementCount::getScalable(NumEls), NF}; 3945 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3946 case BuiltinType::Id: \ 3947 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3948 llvm::ElementCount::getScalable(NumEls), NF}; 3949 #define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3950 case BuiltinType::Id: \ 3951 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF}; 3952 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3953 case BuiltinType::Id: \ 3954 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3955 #include "clang/Basic/RISCVVTypes.def" 3956 } 3957 } 3958 3959 /// getExternrefType - Return a WebAssembly externref type, which represents an 3960 /// opaque reference to a host value. 3961 QualType ASTContext::getWebAssemblyExternrefType() const { 3962 if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) { 3963 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ 3964 if (BuiltinType::Id == BuiltinType::WasmExternRef) \ 3965 return SingletonId; 3966 #include "clang/Basic/WebAssemblyReferenceTypes.def" 3967 } 3968 llvm_unreachable( 3969 "shouldn't try to generate type externref outside WebAssembly target"); 3970 } 3971 3972 /// getScalableVectorType - Return the unique reference to a scalable vector 3973 /// type of the specified element type and size. VectorType must be a built-in 3974 /// type. 3975 QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, 3976 unsigned NumFields) const { 3977 if (Target->hasAArch64SVETypes()) { 3978 uint64_t EltTySize = getTypeSize(EltTy); 3979 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3980 IsSigned, IsFP, IsBF) \ 3981 if (!EltTy->isBooleanType() && \ 3982 ((EltTy->hasIntegerRepresentation() && \ 3983 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3984 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3985 IsFP && !IsBF) || \ 3986 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3987 IsBF && !IsFP)) && \ 3988 EltTySize == ElBits && NumElts == NumEls) { \ 3989 return SingletonId; \ 3990 } 3991 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3992 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3993 return SingletonId; 3994 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId) 3995 #include "clang/Basic/AArch64SVEACLETypes.def" 3996 } else if (Target->hasRISCVVTypes()) { 3997 uint64_t EltTySize = getTypeSize(EltTy); 3998 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3999 IsFP, IsBF) \ 4000 if (!EltTy->isBooleanType() && \ 4001 ((EltTy->hasIntegerRepresentation() && \ 4002 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 4003 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 4004 IsFP && !IsBF) || \ 4005 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 4006 IsBF && !IsFP)) && \ 4007 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ 4008 return SingletonId; 4009 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 4010 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4011 return SingletonId; 4012 #include "clang/Basic/RISCVVTypes.def" 4013 } 4014 return QualType(); 4015 } 4016 4017 /// getVectorType - Return the unique reference to a vector type of 4018 /// the specified element type and size. VectorType must be a built-in type. 4019 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4020 VectorKind VecKind) const { 4021 assert(vecType->isBuiltinType() || 4022 (vecType->isBitIntType() && 4023 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4024 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4025 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4026 4027 // Check if we've already instantiated a vector of this type. 4028 llvm::FoldingSetNodeID ID; 4029 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4030 4031 void *InsertPos = nullptr; 4032 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4033 return QualType(VTP, 0); 4034 4035 // If the element type isn't canonical, this won't be a canonical type either, 4036 // so fill in the canonical type field. 4037 QualType Canonical; 4038 if (!vecType.isCanonical()) { 4039 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4040 4041 // Get the new insert position for the node we care about. 4042 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4043 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4044 } 4045 auto *New = new (*this, alignof(VectorType)) 4046 VectorType(vecType, NumElts, Canonical, VecKind); 4047 VectorTypes.InsertNode(New, InsertPos); 4048 Types.push_back(New); 4049 return QualType(New, 0); 4050 } 4051 4052 QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4053 SourceLocation AttrLoc, 4054 VectorKind VecKind) const { 4055 llvm::FoldingSetNodeID ID; 4056 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4057 VecKind); 4058 void *InsertPos = nullptr; 4059 DependentVectorType *Canon = 4060 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4061 DependentVectorType *New; 4062 4063 if (Canon) { 4064 New = new (*this, alignof(DependentVectorType)) DependentVectorType( 4065 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4066 } else { 4067 QualType CanonVecTy = getCanonicalType(VecType); 4068 if (CanonVecTy == VecType) { 4069 New = new (*this, alignof(DependentVectorType)) 4070 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4071 4072 DependentVectorType *CanonCheck = 4073 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4074 assert(!CanonCheck && 4075 "Dependent-sized vector_size canonical type broken"); 4076 (void)CanonCheck; 4077 DependentVectorTypes.InsertNode(New, InsertPos); 4078 } else { 4079 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4080 SourceLocation(), VecKind); 4081 New = new (*this, alignof(DependentVectorType)) 4082 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4083 } 4084 } 4085 4086 Types.push_back(New); 4087 return QualType(New, 0); 4088 } 4089 4090 /// getExtVectorType - Return the unique reference to an extended vector type of 4091 /// the specified element type and size. VectorType must be a built-in type. 4092 QualType ASTContext::getExtVectorType(QualType vecType, 4093 unsigned NumElts) const { 4094 assert(vecType->isBuiltinType() || vecType->isDependentType() || 4095 (vecType->isBitIntType() && 4096 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4097 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4098 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4099 4100 // Check if we've already instantiated a vector of this type. 4101 llvm::FoldingSetNodeID ID; 4102 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4103 VectorKind::Generic); 4104 void *InsertPos = nullptr; 4105 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4106 return QualType(VTP, 0); 4107 4108 // If the element type isn't canonical, this won't be a canonical type either, 4109 // so fill in the canonical type field. 4110 QualType Canonical; 4111 if (!vecType.isCanonical()) { 4112 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4113 4114 // Get the new insert position for the node we care about. 4115 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4116 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4117 } 4118 auto *New = new (*this, alignof(ExtVectorType)) 4119 ExtVectorType(vecType, NumElts, Canonical); 4120 VectorTypes.InsertNode(New, InsertPos); 4121 Types.push_back(New); 4122 return QualType(New, 0); 4123 } 4124 4125 QualType 4126 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4127 Expr *SizeExpr, 4128 SourceLocation AttrLoc) const { 4129 llvm::FoldingSetNodeID ID; 4130 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4131 SizeExpr); 4132 4133 void *InsertPos = nullptr; 4134 DependentSizedExtVectorType *Canon 4135 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4136 DependentSizedExtVectorType *New; 4137 if (Canon) { 4138 // We already have a canonical version of this array type; use it as 4139 // the canonical type for a newly-built type. 4140 New = new (*this, alignof(DependentSizedExtVectorType)) 4141 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr, 4142 AttrLoc); 4143 } else { 4144 QualType CanonVecTy = getCanonicalType(vecType); 4145 if (CanonVecTy == vecType) { 4146 New = new (*this, alignof(DependentSizedExtVectorType)) 4147 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc); 4148 4149 DependentSizedExtVectorType *CanonCheck 4150 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4151 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4152 (void)CanonCheck; 4153 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4154 } else { 4155 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4156 SourceLocation()); 4157 New = new (*this, alignof(DependentSizedExtVectorType)) 4158 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc); 4159 } 4160 } 4161 4162 Types.push_back(New); 4163 return QualType(New, 0); 4164 } 4165 4166 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4167 unsigned NumColumns) const { 4168 llvm::FoldingSetNodeID ID; 4169 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4170 Type::ConstantMatrix); 4171 4172 assert(MatrixType::isValidElementType(ElementTy) && 4173 "need a valid element type"); 4174 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4175 ConstantMatrixType::isDimensionValid(NumColumns) && 4176 "need valid matrix dimensions"); 4177 void *InsertPos = nullptr; 4178 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4179 return QualType(MTP, 0); 4180 4181 QualType Canonical; 4182 if (!ElementTy.isCanonical()) { 4183 Canonical = 4184 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4185 4186 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4187 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4188 (void)NewIP; 4189 } 4190 4191 auto *New = new (*this, alignof(ConstantMatrixType)) 4192 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4193 MatrixTypes.InsertNode(New, InsertPos); 4194 Types.push_back(New); 4195 return QualType(New, 0); 4196 } 4197 4198 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4199 Expr *RowExpr, 4200 Expr *ColumnExpr, 4201 SourceLocation AttrLoc) const { 4202 QualType CanonElementTy = getCanonicalType(ElementTy); 4203 llvm::FoldingSetNodeID ID; 4204 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4205 ColumnExpr); 4206 4207 void *InsertPos = nullptr; 4208 DependentSizedMatrixType *Canon = 4209 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4210 4211 if (!Canon) { 4212 Canon = new (*this, alignof(DependentSizedMatrixType)) 4213 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr, 4214 ColumnExpr, AttrLoc); 4215 #ifndef NDEBUG 4216 DependentSizedMatrixType *CanonCheck = 4217 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4218 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4219 #endif 4220 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4221 Types.push_back(Canon); 4222 } 4223 4224 // Already have a canonical version of the matrix type 4225 // 4226 // If it exactly matches the requested type, use it directly. 4227 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4228 Canon->getRowExpr() == ColumnExpr) 4229 return QualType(Canon, 0); 4230 4231 // Use Canon as the canonical type for newly-built type. 4232 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType)) 4233 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr, 4234 ColumnExpr, AttrLoc); 4235 Types.push_back(New); 4236 return QualType(New, 0); 4237 } 4238 4239 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4240 Expr *AddrSpaceExpr, 4241 SourceLocation AttrLoc) const { 4242 assert(AddrSpaceExpr->isInstantiationDependent()); 4243 4244 QualType canonPointeeType = getCanonicalType(PointeeType); 4245 4246 void *insertPos = nullptr; 4247 llvm::FoldingSetNodeID ID; 4248 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4249 AddrSpaceExpr); 4250 4251 DependentAddressSpaceType *canonTy = 4252 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4253 4254 if (!canonTy) { 4255 canonTy = new (*this, alignof(DependentAddressSpaceType)) 4256 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr, 4257 AttrLoc); 4258 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4259 Types.push_back(canonTy); 4260 } 4261 4262 if (canonPointeeType == PointeeType && 4263 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4264 return QualType(canonTy, 0); 4265 4266 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType)) 4267 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0), 4268 AddrSpaceExpr, AttrLoc); 4269 Types.push_back(sugaredType); 4270 return QualType(sugaredType, 0); 4271 } 4272 4273 /// Determine whether \p T is canonical as the result type of a function. 4274 static bool isCanonicalResultType(QualType T) { 4275 return T.isCanonical() && 4276 (T.getObjCLifetime() == Qualifiers::OCL_None || 4277 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4278 } 4279 4280 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4281 QualType 4282 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4283 const FunctionType::ExtInfo &Info) const { 4284 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4285 // functionality creates a function without a prototype regardless of 4286 // language mode (so it makes them even in C++). Once the rewriter has been 4287 // fixed, this assertion can be enabled again. 4288 //assert(!LangOpts.requiresStrictPrototypes() && 4289 // "strict prototypes are disabled"); 4290 4291 // Unique functions, to guarantee there is only one function of a particular 4292 // structure. 4293 llvm::FoldingSetNodeID ID; 4294 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4295 4296 void *InsertPos = nullptr; 4297 if (FunctionNoProtoType *FT = 4298 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4299 return QualType(FT, 0); 4300 4301 QualType Canonical; 4302 if (!isCanonicalResultType(ResultTy)) { 4303 Canonical = 4304 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4305 4306 // Get the new insert position for the node we care about. 4307 FunctionNoProtoType *NewIP = 4308 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4309 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4310 } 4311 4312 auto *New = new (*this, alignof(FunctionNoProtoType)) 4313 FunctionNoProtoType(ResultTy, Canonical, Info); 4314 Types.push_back(New); 4315 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4316 return QualType(New, 0); 4317 } 4318 4319 CanQualType 4320 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4321 CanQualType CanResultType = getCanonicalType(ResultType); 4322 4323 // Canonical result types do not have ARC lifetime qualifiers. 4324 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4325 Qualifiers Qs = CanResultType.getQualifiers(); 4326 Qs.removeObjCLifetime(); 4327 return CanQualType::CreateUnsafe( 4328 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4329 } 4330 4331 return CanResultType; 4332 } 4333 4334 static bool isCanonicalExceptionSpecification( 4335 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4336 if (ESI.Type == EST_None) 4337 return true; 4338 if (!NoexceptInType) 4339 return false; 4340 4341 // C++17 onwards: exception specification is part of the type, as a simple 4342 // boolean "can this function type throw". 4343 if (ESI.Type == EST_BasicNoexcept) 4344 return true; 4345 4346 // A noexcept(expr) specification is (possibly) canonical if expr is 4347 // value-dependent. 4348 if (ESI.Type == EST_DependentNoexcept) 4349 return true; 4350 4351 // A dynamic exception specification is canonical if it only contains pack 4352 // expansions (so we can't tell whether it's non-throwing) and all its 4353 // contained types are canonical. 4354 if (ESI.Type == EST_Dynamic) { 4355 bool AnyPackExpansions = false; 4356 for (QualType ET : ESI.Exceptions) { 4357 if (!ET.isCanonical()) 4358 return false; 4359 if (ET->getAs<PackExpansionType>()) 4360 AnyPackExpansions = true; 4361 } 4362 return AnyPackExpansions; 4363 } 4364 4365 return false; 4366 } 4367 4368 QualType ASTContext::getFunctionTypeInternal( 4369 QualType ResultTy, ArrayRef<QualType> ArgArray, 4370 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4371 size_t NumArgs = ArgArray.size(); 4372 4373 // Unique functions, to guarantee there is only one function of a particular 4374 // structure. 4375 llvm::FoldingSetNodeID ID; 4376 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4377 *this, true); 4378 4379 QualType Canonical; 4380 bool Unique = false; 4381 4382 void *InsertPos = nullptr; 4383 if (FunctionProtoType *FPT = 4384 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4385 QualType Existing = QualType(FPT, 0); 4386 4387 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4388 // it so long as our exception specification doesn't contain a dependent 4389 // noexcept expression, or we're just looking for a canonical type. 4390 // Otherwise, we're going to need to create a type 4391 // sugar node to hold the concrete expression. 4392 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4393 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4394 return Existing; 4395 4396 // We need a new type sugar node for this one, to hold the new noexcept 4397 // expression. We do no canonicalization here, but that's OK since we don't 4398 // expect to see the same noexcept expression much more than once. 4399 Canonical = getCanonicalType(Existing); 4400 Unique = true; 4401 } 4402 4403 bool NoexceptInType = getLangOpts().CPlusPlus17; 4404 bool IsCanonicalExceptionSpec = 4405 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4406 4407 // Determine whether the type being created is already canonical or not. 4408 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4409 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4410 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4411 if (!ArgArray[i].isCanonicalAsParam()) 4412 isCanonical = false; 4413 4414 if (OnlyWantCanonical) 4415 assert(isCanonical && 4416 "given non-canonical parameters constructing canonical type"); 4417 4418 // If this type isn't canonical, get the canonical version of it if we don't 4419 // already have it. The exception spec is only partially part of the 4420 // canonical type, and only in C++17 onwards. 4421 if (!isCanonical && Canonical.isNull()) { 4422 SmallVector<QualType, 16> CanonicalArgs; 4423 CanonicalArgs.reserve(NumArgs); 4424 for (unsigned i = 0; i != NumArgs; ++i) 4425 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4426 4427 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4428 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4429 CanonicalEPI.HasTrailingReturn = false; 4430 4431 if (IsCanonicalExceptionSpec) { 4432 // Exception spec is already OK. 4433 } else if (NoexceptInType) { 4434 switch (EPI.ExceptionSpec.Type) { 4435 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4436 // We don't know yet. It shouldn't matter what we pick here; no-one 4437 // should ever look at this. 4438 [[fallthrough]]; 4439 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4440 CanonicalEPI.ExceptionSpec.Type = EST_None; 4441 break; 4442 4443 // A dynamic exception specification is almost always "not noexcept", 4444 // with the exception that a pack expansion might expand to no types. 4445 case EST_Dynamic: { 4446 bool AnyPacks = false; 4447 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4448 if (ET->getAs<PackExpansionType>()) 4449 AnyPacks = true; 4450 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4451 } 4452 if (!AnyPacks) 4453 CanonicalEPI.ExceptionSpec.Type = EST_None; 4454 else { 4455 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4456 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4457 } 4458 break; 4459 } 4460 4461 case EST_DynamicNone: 4462 case EST_BasicNoexcept: 4463 case EST_NoexceptTrue: 4464 case EST_NoThrow: 4465 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4466 break; 4467 4468 case EST_DependentNoexcept: 4469 llvm_unreachable("dependent noexcept is already canonical"); 4470 } 4471 } else { 4472 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4473 } 4474 4475 // Adjust the canonical function result type. 4476 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4477 Canonical = 4478 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4479 4480 // Get the new insert position for the node we care about. 4481 FunctionProtoType *NewIP = 4482 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4483 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4484 } 4485 4486 // Compute the needed size to hold this FunctionProtoType and the 4487 // various trailing objects. 4488 auto ESH = FunctionProtoType::getExceptionSpecSize( 4489 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4490 size_t Size = FunctionProtoType::totalSizeToAlloc< 4491 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4492 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType, 4493 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers>( 4494 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4495 EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType, 4496 ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4497 EPI.ExtParameterInfos ? NumArgs : 0, 4498 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4499 4500 auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType)); 4501 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4502 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4503 Types.push_back(FTP); 4504 if (!Unique) 4505 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4506 return QualType(FTP, 0); 4507 } 4508 4509 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4510 llvm::FoldingSetNodeID ID; 4511 PipeType::Profile(ID, T, ReadOnly); 4512 4513 void *InsertPos = nullptr; 4514 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4515 return QualType(PT, 0); 4516 4517 // If the pipe element type isn't canonical, this won't be a canonical type 4518 // either, so fill in the canonical type field. 4519 QualType Canonical; 4520 if (!T.isCanonical()) { 4521 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4522 4523 // Get the new insert position for the node we care about. 4524 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4525 assert(!NewIP && "Shouldn't be in the map!"); 4526 (void)NewIP; 4527 } 4528 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly); 4529 Types.push_back(New); 4530 PipeTypes.InsertNode(New, InsertPos); 4531 return QualType(New, 0); 4532 } 4533 4534 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4535 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4536 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4537 : Ty; 4538 } 4539 4540 QualType ASTContext::getReadPipeType(QualType T) const { 4541 return getPipeType(T, true); 4542 } 4543 4544 QualType ASTContext::getWritePipeType(QualType T) const { 4545 return getPipeType(T, false); 4546 } 4547 4548 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4549 llvm::FoldingSetNodeID ID; 4550 BitIntType::Profile(ID, IsUnsigned, NumBits); 4551 4552 void *InsertPos = nullptr; 4553 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4554 return QualType(EIT, 0); 4555 4556 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits); 4557 BitIntTypes.InsertNode(New, InsertPos); 4558 Types.push_back(New); 4559 return QualType(New, 0); 4560 } 4561 4562 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4563 Expr *NumBitsExpr) const { 4564 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4565 llvm::FoldingSetNodeID ID; 4566 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4567 4568 void *InsertPos = nullptr; 4569 if (DependentBitIntType *Existing = 4570 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4571 return QualType(Existing, 0); 4572 4573 auto *New = new (*this, alignof(DependentBitIntType)) 4574 DependentBitIntType(IsUnsigned, NumBitsExpr); 4575 DependentBitIntTypes.InsertNode(New, InsertPos); 4576 4577 Types.push_back(New); 4578 return QualType(New, 0); 4579 } 4580 4581 #ifndef NDEBUG 4582 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4583 if (!isa<CXXRecordDecl>(D)) return false; 4584 const auto *RD = cast<CXXRecordDecl>(D); 4585 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4586 return true; 4587 if (RD->getDescribedClassTemplate() && 4588 !isa<ClassTemplateSpecializationDecl>(RD)) 4589 return true; 4590 return false; 4591 } 4592 #endif 4593 4594 /// getInjectedClassNameType - Return the unique reference to the 4595 /// injected class name type for the specified templated declaration. 4596 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4597 QualType TST) const { 4598 assert(NeedsInjectedClassNameType(Decl)); 4599 if (Decl->TypeForDecl) { 4600 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4601 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4602 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4603 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4604 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4605 } else { 4606 Type *newType = new (*this, alignof(InjectedClassNameType)) 4607 InjectedClassNameType(Decl, TST); 4608 Decl->TypeForDecl = newType; 4609 Types.push_back(newType); 4610 } 4611 return QualType(Decl->TypeForDecl, 0); 4612 } 4613 4614 /// getTypeDeclType - Return the unique reference to the type for the 4615 /// specified type declaration. 4616 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4617 assert(Decl && "Passed null for Decl param"); 4618 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4619 4620 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4621 return getTypedefType(Typedef); 4622 4623 assert(!isa<TemplateTypeParmDecl>(Decl) && 4624 "Template type parameter types are always available."); 4625 4626 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4627 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4628 assert(!NeedsInjectedClassNameType(Record)); 4629 return getRecordType(Record); 4630 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4631 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4632 return getEnumType(Enum); 4633 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4634 return getUnresolvedUsingType(Using); 4635 } else 4636 llvm_unreachable("TypeDecl without a type?"); 4637 4638 return QualType(Decl->TypeForDecl, 0); 4639 } 4640 4641 /// getTypedefType - Return the unique reference to the type for the 4642 /// specified typedef name decl. 4643 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4644 QualType Underlying) const { 4645 if (!Decl->TypeForDecl) { 4646 if (Underlying.isNull()) 4647 Underlying = Decl->getUnderlyingType(); 4648 auto *NewType = new (*this, alignof(TypedefType)) TypedefType( 4649 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying)); 4650 Decl->TypeForDecl = NewType; 4651 Types.push_back(NewType); 4652 return QualType(NewType, 0); 4653 } 4654 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying) 4655 return QualType(Decl->TypeForDecl, 0); 4656 assert(hasSameType(Decl->getUnderlyingType(), Underlying)); 4657 4658 llvm::FoldingSetNodeID ID; 4659 TypedefType::Profile(ID, Decl, Underlying); 4660 4661 void *InsertPos = nullptr; 4662 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4663 assert(!T->typeMatchesDecl() && 4664 "non-divergent case should be handled with TypeDecl"); 4665 return QualType(T, 0); 4666 } 4667 4668 void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true), 4669 alignof(TypedefType)); 4670 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying, 4671 getCanonicalType(Underlying)); 4672 TypedefTypes.InsertNode(NewType, InsertPos); 4673 Types.push_back(NewType); 4674 return QualType(NewType, 0); 4675 } 4676 4677 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4678 QualType Underlying) const { 4679 llvm::FoldingSetNodeID ID; 4680 UsingType::Profile(ID, Found, Underlying); 4681 4682 void *InsertPos = nullptr; 4683 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos)) 4684 return QualType(T, 0); 4685 4686 const Type *TypeForDecl = 4687 cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(); 4688 4689 assert(!Underlying.hasLocalQualifiers()); 4690 QualType Canon = Underlying->getCanonicalTypeInternal(); 4691 assert(TypeForDecl->getCanonicalTypeInternal() == Canon); 4692 4693 if (Underlying.getTypePtr() == TypeForDecl) 4694 Underlying = QualType(); 4695 void *Mem = 4696 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()), 4697 alignof(UsingType)); 4698 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon); 4699 Types.push_back(NewType); 4700 UsingTypes.InsertNode(NewType, InsertPos); 4701 return QualType(NewType, 0); 4702 } 4703 4704 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4705 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4706 4707 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4708 if (PrevDecl->TypeForDecl) 4709 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4710 4711 auto *newType = new (*this, alignof(RecordType)) RecordType(Decl); 4712 Decl->TypeForDecl = newType; 4713 Types.push_back(newType); 4714 return QualType(newType, 0); 4715 } 4716 4717 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4718 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4719 4720 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4721 if (PrevDecl->TypeForDecl) 4722 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4723 4724 auto *newType = new (*this, alignof(EnumType)) EnumType(Decl); 4725 Decl->TypeForDecl = newType; 4726 Types.push_back(newType); 4727 return QualType(newType, 0); 4728 } 4729 4730 QualType ASTContext::getUnresolvedUsingType( 4731 const UnresolvedUsingTypenameDecl *Decl) const { 4732 if (Decl->TypeForDecl) 4733 return QualType(Decl->TypeForDecl, 0); 4734 4735 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4736 Decl->getCanonicalDecl()) 4737 if (CanonicalDecl->TypeForDecl) 4738 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4739 4740 Type *newType = 4741 new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl); 4742 Decl->TypeForDecl = newType; 4743 Types.push_back(newType); 4744 return QualType(newType, 0); 4745 } 4746 4747 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4748 QualType modifiedType, 4749 QualType equivalentType) const { 4750 llvm::FoldingSetNodeID id; 4751 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4752 4753 void *insertPos = nullptr; 4754 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4755 if (type) return QualType(type, 0); 4756 4757 QualType canon = getCanonicalType(equivalentType); 4758 type = new (*this, alignof(AttributedType)) 4759 AttributedType(canon, attrKind, modifiedType, equivalentType); 4760 4761 Types.push_back(type); 4762 AttributedTypes.InsertNode(type, insertPos); 4763 4764 return QualType(type, 0); 4765 } 4766 4767 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 4768 QualType Wrapped) { 4769 llvm::FoldingSetNodeID ID; 4770 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 4771 4772 void *InsertPos = nullptr; 4773 BTFTagAttributedType *Ty = 4774 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 4775 if (Ty) 4776 return QualType(Ty, 0); 4777 4778 QualType Canon = getCanonicalType(Wrapped); 4779 Ty = new (*this, alignof(BTFTagAttributedType)) 4780 BTFTagAttributedType(Canon, Wrapped, BTFAttr); 4781 4782 Types.push_back(Ty); 4783 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 4784 4785 return QualType(Ty, 0); 4786 } 4787 4788 /// Retrieve a substitution-result type. 4789 QualType ASTContext::getSubstTemplateTypeParmType( 4790 QualType Replacement, Decl *AssociatedDecl, unsigned Index, 4791 std::optional<unsigned> PackIndex) const { 4792 llvm::FoldingSetNodeID ID; 4793 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index, 4794 PackIndex); 4795 void *InsertPos = nullptr; 4796 SubstTemplateTypeParmType *SubstParm = 4797 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4798 4799 if (!SubstParm) { 4800 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>( 4801 !Replacement.isCanonical()), 4802 alignof(SubstTemplateTypeParmType)); 4803 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl, 4804 Index, PackIndex); 4805 Types.push_back(SubstParm); 4806 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4807 } 4808 4809 return QualType(SubstParm, 0); 4810 } 4811 4812 /// Retrieve a 4813 QualType 4814 ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl, 4815 unsigned Index, bool Final, 4816 const TemplateArgument &ArgPack) { 4817 #ifndef NDEBUG 4818 for (const auto &P : ArgPack.pack_elements()) 4819 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type"); 4820 #endif 4821 4822 llvm::FoldingSetNodeID ID; 4823 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final, 4824 ArgPack); 4825 void *InsertPos = nullptr; 4826 if (SubstTemplateTypeParmPackType *SubstParm = 4827 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4828 return QualType(SubstParm, 0); 4829 4830 QualType Canon; 4831 { 4832 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); 4833 if (!AssociatedDecl->isCanonicalDecl() || 4834 !CanonArgPack.structurallyEquals(ArgPack)) { 4835 Canon = getSubstTemplateTypeParmPackType( 4836 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack); 4837 [[maybe_unused]] const auto *Nothing = 4838 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4839 assert(!Nothing); 4840 } 4841 } 4842 4843 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType)) 4844 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final, 4845 ArgPack); 4846 Types.push_back(SubstParm); 4847 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4848 return QualType(SubstParm, 0); 4849 } 4850 4851 /// Retrieve the template type parameter type for a template 4852 /// parameter or parameter pack with the given depth, index, and (optionally) 4853 /// name. 4854 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4855 bool ParameterPack, 4856 TemplateTypeParmDecl *TTPDecl) const { 4857 llvm::FoldingSetNodeID ID; 4858 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4859 void *InsertPos = nullptr; 4860 TemplateTypeParmType *TypeParm 4861 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4862 4863 if (TypeParm) 4864 return QualType(TypeParm, 0); 4865 4866 if (TTPDecl) { 4867 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4868 TypeParm = new (*this, alignof(TemplateTypeParmType)) 4869 TemplateTypeParmType(TTPDecl, Canon); 4870 4871 TemplateTypeParmType *TypeCheck 4872 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4873 assert(!TypeCheck && "Template type parameter canonical type broken"); 4874 (void)TypeCheck; 4875 } else 4876 TypeParm = new (*this, alignof(TemplateTypeParmType)) 4877 TemplateTypeParmType(Depth, Index, ParameterPack); 4878 4879 Types.push_back(TypeParm); 4880 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4881 4882 return QualType(TypeParm, 0); 4883 } 4884 4885 TypeSourceInfo * 4886 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4887 SourceLocation NameLoc, 4888 const TemplateArgumentListInfo &Args, 4889 QualType Underlying) const { 4890 assert(!Name.getAsDependentTemplateName() && 4891 "No dependent template names here!"); 4892 QualType TST = 4893 getTemplateSpecializationType(Name, Args.arguments(), Underlying); 4894 4895 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4896 TemplateSpecializationTypeLoc TL = 4897 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4898 TL.setTemplateKeywordLoc(SourceLocation()); 4899 TL.setTemplateNameLoc(NameLoc); 4900 TL.setLAngleLoc(Args.getLAngleLoc()); 4901 TL.setRAngleLoc(Args.getRAngleLoc()); 4902 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4903 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4904 return DI; 4905 } 4906 4907 QualType 4908 ASTContext::getTemplateSpecializationType(TemplateName Template, 4909 ArrayRef<TemplateArgumentLoc> Args, 4910 QualType Underlying) const { 4911 assert(!Template.getAsDependentTemplateName() && 4912 "No dependent template names here!"); 4913 4914 SmallVector<TemplateArgument, 4> ArgVec; 4915 ArgVec.reserve(Args.size()); 4916 for (const TemplateArgumentLoc &Arg : Args) 4917 ArgVec.push_back(Arg.getArgument()); 4918 4919 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4920 } 4921 4922 #ifndef NDEBUG 4923 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4924 for (const TemplateArgument &Arg : Args) 4925 if (Arg.isPackExpansion()) 4926 return true; 4927 4928 return true; 4929 } 4930 #endif 4931 4932 QualType 4933 ASTContext::getTemplateSpecializationType(TemplateName Template, 4934 ArrayRef<TemplateArgument> Args, 4935 QualType Underlying) const { 4936 assert(!Template.getAsDependentTemplateName() && 4937 "No dependent template names here!"); 4938 // Look through qualified template names. 4939 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4940 Template = QTN->getUnderlyingTemplate(); 4941 4942 const auto *TD = Template.getAsTemplateDecl(); 4943 bool IsTypeAlias = TD && TD->isTypeAlias(); 4944 QualType CanonType; 4945 if (!Underlying.isNull()) 4946 CanonType = getCanonicalType(Underlying); 4947 else { 4948 // We can get here with an alias template when the specialization contains 4949 // a pack expansion that does not match up with a parameter pack. 4950 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4951 "Caller must compute aliased type"); 4952 IsTypeAlias = false; 4953 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4954 } 4955 4956 // Allocate the (non-canonical) template specialization type, but don't 4957 // try to unique it: these types typically have location information that 4958 // we don't unique and don't want to lose. 4959 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4960 sizeof(TemplateArgument) * Args.size() + 4961 (IsTypeAlias ? sizeof(QualType) : 0), 4962 alignof(TemplateSpecializationType)); 4963 auto *Spec 4964 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4965 IsTypeAlias ? Underlying : QualType()); 4966 4967 Types.push_back(Spec); 4968 return QualType(Spec, 0); 4969 } 4970 4971 QualType ASTContext::getCanonicalTemplateSpecializationType( 4972 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4973 assert(!Template.getAsDependentTemplateName() && 4974 "No dependent template names here!"); 4975 4976 // Look through qualified template names. 4977 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4978 Template = TemplateName(QTN->getUnderlyingTemplate()); 4979 4980 // Build the canonical template specialization type. 4981 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4982 bool AnyNonCanonArgs = false; 4983 auto CanonArgs = 4984 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 4985 4986 // Determine whether this canonical template specialization type already 4987 // exists. 4988 llvm::FoldingSetNodeID ID; 4989 TemplateSpecializationType::Profile(ID, CanonTemplate, 4990 CanonArgs, *this); 4991 4992 void *InsertPos = nullptr; 4993 TemplateSpecializationType *Spec 4994 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4995 4996 if (!Spec) { 4997 // Allocate a new canonical template specialization type. 4998 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4999 sizeof(TemplateArgument) * CanonArgs.size()), 5000 alignof(TemplateSpecializationType)); 5001 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 5002 CanonArgs, 5003 QualType(), QualType()); 5004 Types.push_back(Spec); 5005 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 5006 } 5007 5008 assert(Spec->isDependentType() && 5009 "Non-dependent template-id type must have a canonical type"); 5010 return QualType(Spec, 0); 5011 } 5012 5013 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 5014 NestedNameSpecifier *NNS, 5015 QualType NamedType, 5016 TagDecl *OwnedTagDecl) const { 5017 llvm::FoldingSetNodeID ID; 5018 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 5019 5020 void *InsertPos = nullptr; 5021 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5022 if (T) 5023 return QualType(T, 0); 5024 5025 QualType Canon = NamedType; 5026 if (!Canon.isCanonical()) { 5027 Canon = getCanonicalType(NamedType); 5028 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5029 assert(!CheckT && "Elaborated canonical type broken"); 5030 (void)CheckT; 5031 } 5032 5033 void *Mem = 5034 Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 5035 alignof(ElaboratedType)); 5036 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 5037 5038 Types.push_back(T); 5039 ElaboratedTypes.InsertNode(T, InsertPos); 5040 return QualType(T, 0); 5041 } 5042 5043 QualType 5044 ASTContext::getParenType(QualType InnerType) const { 5045 llvm::FoldingSetNodeID ID; 5046 ParenType::Profile(ID, InnerType); 5047 5048 void *InsertPos = nullptr; 5049 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5050 if (T) 5051 return QualType(T, 0); 5052 5053 QualType Canon = InnerType; 5054 if (!Canon.isCanonical()) { 5055 Canon = getCanonicalType(InnerType); 5056 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5057 assert(!CheckT && "Paren canonical type broken"); 5058 (void)CheckT; 5059 } 5060 5061 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon); 5062 Types.push_back(T); 5063 ParenTypes.InsertNode(T, InsertPos); 5064 return QualType(T, 0); 5065 } 5066 5067 QualType 5068 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5069 const IdentifierInfo *MacroII) const { 5070 QualType Canon = UnderlyingTy; 5071 if (!Canon.isCanonical()) 5072 Canon = getCanonicalType(UnderlyingTy); 5073 5074 auto *newType = new (*this, alignof(MacroQualifiedType)) 5075 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5076 Types.push_back(newType); 5077 return QualType(newType, 0); 5078 } 5079 5080 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5081 NestedNameSpecifier *NNS, 5082 const IdentifierInfo *Name, 5083 QualType Canon) const { 5084 if (Canon.isNull()) { 5085 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5086 if (CanonNNS != NNS) 5087 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5088 } 5089 5090 llvm::FoldingSetNodeID ID; 5091 DependentNameType::Profile(ID, Keyword, NNS, Name); 5092 5093 void *InsertPos = nullptr; 5094 DependentNameType *T 5095 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5096 if (T) 5097 return QualType(T, 0); 5098 5099 T = new (*this, alignof(DependentNameType)) 5100 DependentNameType(Keyword, NNS, Name, Canon); 5101 Types.push_back(T); 5102 DependentNameTypes.InsertNode(T, InsertPos); 5103 return QualType(T, 0); 5104 } 5105 5106 QualType ASTContext::getDependentTemplateSpecializationType( 5107 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, 5108 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const { 5109 // TODO: avoid this copy 5110 SmallVector<TemplateArgument, 16> ArgCopy; 5111 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5112 ArgCopy.push_back(Args[I].getArgument()); 5113 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5114 } 5115 5116 QualType 5117 ASTContext::getDependentTemplateSpecializationType( 5118 ElaboratedTypeKeyword Keyword, 5119 NestedNameSpecifier *NNS, 5120 const IdentifierInfo *Name, 5121 ArrayRef<TemplateArgument> Args) const { 5122 assert((!NNS || NNS->isDependent()) && 5123 "nested-name-specifier must be dependent"); 5124 5125 llvm::FoldingSetNodeID ID; 5126 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5127 Name, Args); 5128 5129 void *InsertPos = nullptr; 5130 DependentTemplateSpecializationType *T 5131 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5132 if (T) 5133 return QualType(T, 0); 5134 5135 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5136 5137 ElaboratedTypeKeyword CanonKeyword = Keyword; 5138 if (Keyword == ElaboratedTypeKeyword::None) 5139 CanonKeyword = ElaboratedTypeKeyword::Typename; 5140 5141 bool AnyNonCanonArgs = false; 5142 auto CanonArgs = 5143 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5144 5145 QualType Canon; 5146 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5147 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5148 Name, 5149 CanonArgs); 5150 5151 // Find the insert position again. 5152 [[maybe_unused]] auto *Nothing = 5153 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5154 assert(!Nothing && "canonical type broken"); 5155 } 5156 5157 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5158 sizeof(TemplateArgument) * Args.size()), 5159 alignof(DependentTemplateSpecializationType)); 5160 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5161 Name, Args, Canon); 5162 Types.push_back(T); 5163 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5164 return QualType(T, 0); 5165 } 5166 5167 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5168 TemplateArgument Arg; 5169 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5170 QualType ArgType = getTypeDeclType(TTP); 5171 if (TTP->isParameterPack()) 5172 ArgType = getPackExpansionType(ArgType, std::nullopt); 5173 5174 Arg = TemplateArgument(ArgType); 5175 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5176 QualType T = 5177 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5178 // For class NTTPs, ensure we include the 'const' so the type matches that 5179 // of a real template argument. 5180 // FIXME: It would be more faithful to model this as something like an 5181 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5182 if (T->isRecordType()) 5183 T.addConst(); 5184 Expr *E = new (*this) DeclRefExpr( 5185 *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T, 5186 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5187 5188 if (NTTP->isParameterPack()) 5189 E = new (*this) 5190 PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt); 5191 Arg = TemplateArgument(E); 5192 } else { 5193 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5194 if (TTP->isParameterPack()) 5195 Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>()); 5196 else 5197 Arg = TemplateArgument(TemplateName(TTP)); 5198 } 5199 5200 if (Param->isTemplateParameterPack()) 5201 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5202 5203 return Arg; 5204 } 5205 5206 void 5207 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5208 SmallVectorImpl<TemplateArgument> &Args) { 5209 Args.reserve(Args.size() + Params->size()); 5210 5211 for (NamedDecl *Param : *Params) 5212 Args.push_back(getInjectedTemplateArg(Param)); 5213 } 5214 5215 QualType ASTContext::getPackExpansionType(QualType Pattern, 5216 std::optional<unsigned> NumExpansions, 5217 bool ExpectPackInType) { 5218 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5219 "Pack expansions must expand one or more parameter packs"); 5220 5221 llvm::FoldingSetNodeID ID; 5222 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5223 5224 void *InsertPos = nullptr; 5225 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5226 if (T) 5227 return QualType(T, 0); 5228 5229 QualType Canon; 5230 if (!Pattern.isCanonical()) { 5231 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5232 /*ExpectPackInType=*/false); 5233 5234 // Find the insert position again, in case we inserted an element into 5235 // PackExpansionTypes and invalidated our insert position. 5236 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5237 } 5238 5239 T = new (*this, alignof(PackExpansionType)) 5240 PackExpansionType(Pattern, Canon, NumExpansions); 5241 Types.push_back(T); 5242 PackExpansionTypes.InsertNode(T, InsertPos); 5243 return QualType(T, 0); 5244 } 5245 5246 /// CmpProtocolNames - Comparison predicate for sorting protocols 5247 /// alphabetically. 5248 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5249 ObjCProtocolDecl *const *RHS) { 5250 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5251 } 5252 5253 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5254 if (Protocols.empty()) return true; 5255 5256 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5257 return false; 5258 5259 for (unsigned i = 1; i != Protocols.size(); ++i) 5260 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5261 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5262 return false; 5263 return true; 5264 } 5265 5266 static void 5267 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5268 // Sort protocols, keyed by name. 5269 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5270 5271 // Canonicalize. 5272 for (ObjCProtocolDecl *&P : Protocols) 5273 P = P->getCanonicalDecl(); 5274 5275 // Remove duplicates. 5276 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5277 Protocols.erase(ProtocolsEnd, Protocols.end()); 5278 } 5279 5280 QualType ASTContext::getObjCObjectType(QualType BaseType, 5281 ObjCProtocolDecl * const *Protocols, 5282 unsigned NumProtocols) const { 5283 return getObjCObjectType(BaseType, {}, 5284 llvm::ArrayRef(Protocols, NumProtocols), 5285 /*isKindOf=*/false); 5286 } 5287 5288 QualType ASTContext::getObjCObjectType( 5289 QualType baseType, 5290 ArrayRef<QualType> typeArgs, 5291 ArrayRef<ObjCProtocolDecl *> protocols, 5292 bool isKindOf) const { 5293 // If the base type is an interface and there aren't any protocols or 5294 // type arguments to add, then the interface type will do just fine. 5295 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5296 isa<ObjCInterfaceType>(baseType)) 5297 return baseType; 5298 5299 // Look in the folding set for an existing type. 5300 llvm::FoldingSetNodeID ID; 5301 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5302 void *InsertPos = nullptr; 5303 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5304 return QualType(QT, 0); 5305 5306 // Determine the type arguments to be used for canonicalization, 5307 // which may be explicitly specified here or written on the base 5308 // type. 5309 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5310 if (effectiveTypeArgs.empty()) { 5311 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5312 effectiveTypeArgs = baseObject->getTypeArgs(); 5313 } 5314 5315 // Build the canonical type, which has the canonical base type and a 5316 // sorted-and-uniqued list of protocols and the type arguments 5317 // canonicalized. 5318 QualType canonical; 5319 bool typeArgsAreCanonical = llvm::all_of( 5320 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5321 bool protocolsSorted = areSortedAndUniqued(protocols); 5322 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5323 // Determine the canonical type arguments. 5324 ArrayRef<QualType> canonTypeArgs; 5325 SmallVector<QualType, 4> canonTypeArgsVec; 5326 if (!typeArgsAreCanonical) { 5327 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5328 for (auto typeArg : effectiveTypeArgs) 5329 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5330 canonTypeArgs = canonTypeArgsVec; 5331 } else { 5332 canonTypeArgs = effectiveTypeArgs; 5333 } 5334 5335 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5336 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5337 if (!protocolsSorted) { 5338 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5339 SortAndUniqueProtocols(canonProtocolsVec); 5340 canonProtocols = canonProtocolsVec; 5341 } else { 5342 canonProtocols = protocols; 5343 } 5344 5345 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5346 canonProtocols, isKindOf); 5347 5348 // Regenerate InsertPos. 5349 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5350 } 5351 5352 unsigned size = sizeof(ObjCObjectTypeImpl); 5353 size += typeArgs.size() * sizeof(QualType); 5354 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5355 void *mem = Allocate(size, alignof(ObjCObjectTypeImpl)); 5356 auto *T = 5357 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5358 isKindOf); 5359 5360 Types.push_back(T); 5361 ObjCObjectTypes.InsertNode(T, InsertPos); 5362 return QualType(T, 0); 5363 } 5364 5365 /// Apply Objective-C protocol qualifiers to the given type. 5366 /// If this is for the canonical type of a type parameter, we can apply 5367 /// protocol qualifiers on the ObjCObjectPointerType. 5368 QualType 5369 ASTContext::applyObjCProtocolQualifiers(QualType type, 5370 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5371 bool allowOnPointerType) const { 5372 hasError = false; 5373 5374 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5375 return getObjCTypeParamType(objT->getDecl(), protocols); 5376 } 5377 5378 // Apply protocol qualifiers to ObjCObjectPointerType. 5379 if (allowOnPointerType) { 5380 if (const auto *objPtr = 5381 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5382 const ObjCObjectType *objT = objPtr->getObjectType(); 5383 // Merge protocol lists and construct ObjCObjectType. 5384 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5385 protocolsVec.append(objT->qual_begin(), 5386 objT->qual_end()); 5387 protocolsVec.append(protocols.begin(), protocols.end()); 5388 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5389 type = getObjCObjectType( 5390 objT->getBaseType(), 5391 objT->getTypeArgsAsWritten(), 5392 protocols, 5393 objT->isKindOfTypeAsWritten()); 5394 return getObjCObjectPointerType(type); 5395 } 5396 } 5397 5398 // Apply protocol qualifiers to ObjCObjectType. 5399 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5400 // FIXME: Check for protocols to which the class type is already 5401 // known to conform. 5402 5403 return getObjCObjectType(objT->getBaseType(), 5404 objT->getTypeArgsAsWritten(), 5405 protocols, 5406 objT->isKindOfTypeAsWritten()); 5407 } 5408 5409 // If the canonical type is ObjCObjectType, ... 5410 if (type->isObjCObjectType()) { 5411 // Silently overwrite any existing protocol qualifiers. 5412 // TODO: determine whether that's the right thing to do. 5413 5414 // FIXME: Check for protocols to which the class type is already 5415 // known to conform. 5416 return getObjCObjectType(type, {}, protocols, false); 5417 } 5418 5419 // id<protocol-list> 5420 if (type->isObjCIdType()) { 5421 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5422 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5423 objPtr->isKindOfType()); 5424 return getObjCObjectPointerType(type); 5425 } 5426 5427 // Class<protocol-list> 5428 if (type->isObjCClassType()) { 5429 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5430 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5431 objPtr->isKindOfType()); 5432 return getObjCObjectPointerType(type); 5433 } 5434 5435 hasError = true; 5436 return type; 5437 } 5438 5439 QualType 5440 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5441 ArrayRef<ObjCProtocolDecl *> protocols) const { 5442 // Look in the folding set for an existing type. 5443 llvm::FoldingSetNodeID ID; 5444 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5445 void *InsertPos = nullptr; 5446 if (ObjCTypeParamType *TypeParam = 5447 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5448 return QualType(TypeParam, 0); 5449 5450 // We canonicalize to the underlying type. 5451 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5452 if (!protocols.empty()) { 5453 // Apply the protocol qualifers. 5454 bool hasError; 5455 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5456 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5457 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5458 } 5459 5460 unsigned size = sizeof(ObjCTypeParamType); 5461 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5462 void *mem = Allocate(size, alignof(ObjCTypeParamType)); 5463 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5464 5465 Types.push_back(newType); 5466 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5467 return QualType(newType, 0); 5468 } 5469 5470 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5471 ObjCTypeParamDecl *New) const { 5472 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5473 // Update TypeForDecl after updating TypeSourceInfo. 5474 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5475 SmallVector<ObjCProtocolDecl *, 8> protocols; 5476 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5477 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5478 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5479 } 5480 5481 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5482 /// protocol list adopt all protocols in QT's qualified-id protocol 5483 /// list. 5484 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5485 ObjCInterfaceDecl *IC) { 5486 if (!QT->isObjCQualifiedIdType()) 5487 return false; 5488 5489 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5490 // If both the right and left sides have qualifiers. 5491 for (auto *Proto : OPT->quals()) { 5492 if (!IC->ClassImplementsProtocol(Proto, false)) 5493 return false; 5494 } 5495 return true; 5496 } 5497 return false; 5498 } 5499 5500 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5501 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5502 /// of protocols. 5503 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5504 ObjCInterfaceDecl *IDecl) { 5505 if (!QT->isObjCQualifiedIdType()) 5506 return false; 5507 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5508 if (!OPT) 5509 return false; 5510 if (!IDecl->hasDefinition()) 5511 return false; 5512 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5513 CollectInheritedProtocols(IDecl, InheritedProtocols); 5514 if (InheritedProtocols.empty()) 5515 return false; 5516 // Check that if every protocol in list of id<plist> conforms to a protocol 5517 // of IDecl's, then bridge casting is ok. 5518 bool Conforms = false; 5519 for (auto *Proto : OPT->quals()) { 5520 Conforms = false; 5521 for (auto *PI : InheritedProtocols) { 5522 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5523 Conforms = true; 5524 break; 5525 } 5526 } 5527 if (!Conforms) 5528 break; 5529 } 5530 if (Conforms) 5531 return true; 5532 5533 for (auto *PI : InheritedProtocols) { 5534 // If both the right and left sides have qualifiers. 5535 bool Adopts = false; 5536 for (auto *Proto : OPT->quals()) { 5537 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5538 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5539 break; 5540 } 5541 if (!Adopts) 5542 return false; 5543 } 5544 return true; 5545 } 5546 5547 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5548 /// the given object type. 5549 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5550 llvm::FoldingSetNodeID ID; 5551 ObjCObjectPointerType::Profile(ID, ObjectT); 5552 5553 void *InsertPos = nullptr; 5554 if (ObjCObjectPointerType *QT = 5555 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5556 return QualType(QT, 0); 5557 5558 // Find the canonical object type. 5559 QualType Canonical; 5560 if (!ObjectT.isCanonical()) { 5561 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5562 5563 // Regenerate InsertPos. 5564 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5565 } 5566 5567 // No match. 5568 void *Mem = 5569 Allocate(sizeof(ObjCObjectPointerType), alignof(ObjCObjectPointerType)); 5570 auto *QType = 5571 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5572 5573 Types.push_back(QType); 5574 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5575 return QualType(QType, 0); 5576 } 5577 5578 /// getObjCInterfaceType - Return the unique reference to the type for the 5579 /// specified ObjC interface decl. The list of protocols is optional. 5580 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5581 ObjCInterfaceDecl *PrevDecl) const { 5582 if (Decl->TypeForDecl) 5583 return QualType(Decl->TypeForDecl, 0); 5584 5585 if (PrevDecl) { 5586 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5587 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5588 return QualType(PrevDecl->TypeForDecl, 0); 5589 } 5590 5591 // Prefer the definition, if there is one. 5592 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5593 Decl = Def; 5594 5595 void *Mem = Allocate(sizeof(ObjCInterfaceType), alignof(ObjCInterfaceType)); 5596 auto *T = new (Mem) ObjCInterfaceType(Decl); 5597 Decl->TypeForDecl = T; 5598 Types.push_back(T); 5599 return QualType(T, 0); 5600 } 5601 5602 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5603 /// TypeOfExprType AST's (since expression's are never shared). For example, 5604 /// multiple declarations that refer to "typeof(x)" all contain different 5605 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5606 /// on canonical type's (which are always unique). 5607 QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { 5608 TypeOfExprType *toe; 5609 if (tofExpr->isTypeDependent()) { 5610 llvm::FoldingSetNodeID ID; 5611 DependentTypeOfExprType::Profile(ID, *this, tofExpr, 5612 Kind == TypeOfKind::Unqualified); 5613 5614 void *InsertPos = nullptr; 5615 DependentTypeOfExprType *Canon = 5616 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5617 if (Canon) { 5618 // We already have a "canonical" version of an identical, dependent 5619 // typeof(expr) type. Use that as our canonical type. 5620 toe = new (*this, alignof(TypeOfExprType)) 5621 TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); 5622 } else { 5623 // Build a new, canonical typeof(expr) type. 5624 Canon = new (*this, alignof(DependentTypeOfExprType)) 5625 DependentTypeOfExprType(tofExpr, Kind); 5626 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5627 toe = Canon; 5628 } 5629 } else { 5630 QualType Canonical = getCanonicalType(tofExpr->getType()); 5631 toe = new (*this, alignof(TypeOfExprType)) 5632 TypeOfExprType(tofExpr, Kind, Canonical); 5633 } 5634 Types.push_back(toe); 5635 return QualType(toe, 0); 5636 } 5637 5638 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5639 /// TypeOfType nodes. The only motivation to unique these nodes would be 5640 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5641 /// an issue. This doesn't affect the type checker, since it operates 5642 /// on canonical types (which are always unique). 5643 QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { 5644 QualType Canonical = getCanonicalType(tofType); 5645 auto *tot = 5646 new (*this, alignof(TypeOfType)) TypeOfType(tofType, Canonical, Kind); 5647 Types.push_back(tot); 5648 return QualType(tot, 0); 5649 } 5650 5651 /// getReferenceQualifiedType - Given an expr, will return the type for 5652 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5653 /// and class member access into account. 5654 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5655 // C++11 [dcl.type.simple]p4: 5656 // [...] 5657 QualType T = E->getType(); 5658 switch (E->getValueKind()) { 5659 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5660 // type of e; 5661 case VK_XValue: 5662 return getRValueReferenceType(T); 5663 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5664 // type of e; 5665 case VK_LValue: 5666 return getLValueReferenceType(T); 5667 // - otherwise, decltype(e) is the type of e. 5668 case VK_PRValue: 5669 return T; 5670 } 5671 llvm_unreachable("Unknown value kind"); 5672 } 5673 5674 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5675 /// nodes. This would never be helpful, since each such type has its own 5676 /// expression, and would not give a significant memory saving, since there 5677 /// is an Expr tree under each such type. 5678 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5679 DecltypeType *dt; 5680 5681 // C++11 [temp.type]p2: 5682 // If an expression e involves a template parameter, decltype(e) denotes a 5683 // unique dependent type. Two such decltype-specifiers refer to the same 5684 // type only if their expressions are equivalent (14.5.6.1). 5685 if (e->isInstantiationDependent()) { 5686 llvm::FoldingSetNodeID ID; 5687 DependentDecltypeType::Profile(ID, *this, e); 5688 5689 void *InsertPos = nullptr; 5690 DependentDecltypeType *Canon 5691 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5692 if (!Canon) { 5693 // Build a new, canonical decltype(expr) type. 5694 Canon = new (*this, alignof(DependentDecltypeType)) 5695 DependentDecltypeType(e, DependentTy); 5696 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5697 } 5698 dt = new (*this, alignof(DecltypeType)) 5699 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5700 } else { 5701 dt = new (*this, alignof(DecltypeType)) 5702 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5703 } 5704 Types.push_back(dt); 5705 return QualType(dt, 0); 5706 } 5707 5708 /// getUnaryTransformationType - We don't unique these, since the memory 5709 /// savings are minimal and these are rare. 5710 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5711 QualType UnderlyingType, 5712 UnaryTransformType::UTTKind Kind) 5713 const { 5714 UnaryTransformType *ut = nullptr; 5715 5716 if (BaseType->isDependentType()) { 5717 // Look in the folding set for an existing type. 5718 llvm::FoldingSetNodeID ID; 5719 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5720 5721 void *InsertPos = nullptr; 5722 DependentUnaryTransformType *Canon 5723 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5724 5725 if (!Canon) { 5726 // Build a new, canonical __underlying_type(type) type. 5727 Canon = new (*this, alignof(DependentUnaryTransformType)) 5728 DependentUnaryTransformType(*this, getCanonicalType(BaseType), Kind); 5729 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5730 } 5731 ut = new (*this, alignof(UnaryTransformType)) 5732 UnaryTransformType(BaseType, QualType(), Kind, QualType(Canon, 0)); 5733 } else { 5734 QualType CanonType = getCanonicalType(UnderlyingType); 5735 ut = new (*this, alignof(UnaryTransformType)) 5736 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType); 5737 } 5738 Types.push_back(ut); 5739 return QualType(ut, 0); 5740 } 5741 5742 QualType ASTContext::getAutoTypeInternal( 5743 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5744 bool IsPack, ConceptDecl *TypeConstraintConcept, 5745 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5746 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5747 !TypeConstraintConcept && !IsDependent) 5748 return getAutoDeductType(); 5749 5750 // Look in the folding set for an existing type. 5751 void *InsertPos = nullptr; 5752 llvm::FoldingSetNodeID ID; 5753 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5754 TypeConstraintConcept, TypeConstraintArgs); 5755 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5756 return QualType(AT, 0); 5757 5758 QualType Canon; 5759 if (!IsCanon) { 5760 if (!DeducedType.isNull()) { 5761 Canon = DeducedType.getCanonicalType(); 5762 } else if (TypeConstraintConcept) { 5763 bool AnyNonCanonArgs = false; 5764 ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl(); 5765 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments( 5766 *this, TypeConstraintArgs, AnyNonCanonArgs); 5767 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) { 5768 Canon = 5769 getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5770 CanonicalConcept, CanonicalConceptArgs, true); 5771 // Find the insert position again. 5772 [[maybe_unused]] auto *Nothing = 5773 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5774 assert(!Nothing && "canonical type broken"); 5775 } 5776 } 5777 } 5778 5779 void *Mem = Allocate(sizeof(AutoType) + 5780 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5781 alignof(AutoType)); 5782 auto *AT = new (Mem) AutoType( 5783 DeducedType, Keyword, 5784 (IsDependent ? TypeDependence::DependentInstantiation 5785 : TypeDependence::None) | 5786 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5787 Canon, TypeConstraintConcept, TypeConstraintArgs); 5788 Types.push_back(AT); 5789 AutoTypes.InsertNode(AT, InsertPos); 5790 return QualType(AT, 0); 5791 } 5792 5793 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5794 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5795 /// canonical deduced-but-dependent 'auto' type. 5796 QualType 5797 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5798 bool IsDependent, bool IsPack, 5799 ConceptDecl *TypeConstraintConcept, 5800 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5801 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5802 assert((!IsDependent || DeducedType.isNull()) && 5803 "A dependent auto should be undeduced"); 5804 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5805 TypeConstraintConcept, TypeConstraintArgs); 5806 } 5807 5808 QualType ASTContext::getUnconstrainedType(QualType T) const { 5809 QualType CanonT = T.getCanonicalType(); 5810 5811 // Remove a type-constraint from a top-level auto or decltype(auto). 5812 if (auto *AT = CanonT->getAs<AutoType>()) { 5813 if (!AT->isConstrained()) 5814 return T; 5815 return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), false, 5816 AT->containsUnexpandedParameterPack()), 5817 T.getQualifiers()); 5818 } 5819 5820 // FIXME: We only support constrained auto at the top level in the type of a 5821 // non-type template parameter at the moment. Once we lift that restriction, 5822 // we'll need to recursively build types containing auto here. 5823 assert(!CanonT->getContainedAutoType() || 5824 !CanonT->getContainedAutoType()->isConstrained()); 5825 return T; 5826 } 5827 5828 /// Return the uniqued reference to the deduced template specialization type 5829 /// which has been deduced to the given type, or to the canonical undeduced 5830 /// such type, or the canonical deduced-but-dependent such type. 5831 QualType ASTContext::getDeducedTemplateSpecializationType( 5832 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5833 // Look in the folding set for an existing type. 5834 void *InsertPos = nullptr; 5835 llvm::FoldingSetNodeID ID; 5836 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5837 IsDependent); 5838 if (DeducedTemplateSpecializationType *DTST = 5839 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5840 return QualType(DTST, 0); 5841 5842 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType)) 5843 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5844 llvm::FoldingSetNodeID TempID; 5845 DTST->Profile(TempID); 5846 assert(ID == TempID && "ID does not match"); 5847 Types.push_back(DTST); 5848 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5849 return QualType(DTST, 0); 5850 } 5851 5852 /// getAtomicType - Return the uniqued reference to the atomic type for 5853 /// the given value type. 5854 QualType ASTContext::getAtomicType(QualType T) const { 5855 // Unique pointers, to guarantee there is only one pointer of a particular 5856 // structure. 5857 llvm::FoldingSetNodeID ID; 5858 AtomicType::Profile(ID, T); 5859 5860 void *InsertPos = nullptr; 5861 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5862 return QualType(AT, 0); 5863 5864 // If the atomic value type isn't canonical, this won't be a canonical type 5865 // either, so fill in the canonical type field. 5866 QualType Canonical; 5867 if (!T.isCanonical()) { 5868 Canonical = getAtomicType(getCanonicalType(T)); 5869 5870 // Get the new insert position for the node we care about. 5871 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5872 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5873 } 5874 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical); 5875 Types.push_back(New); 5876 AtomicTypes.InsertNode(New, InsertPos); 5877 return QualType(New, 0); 5878 } 5879 5880 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5881 QualType ASTContext::getAutoDeductType() const { 5882 if (AutoDeductTy.isNull()) 5883 AutoDeductTy = QualType(new (*this, alignof(AutoType)) 5884 AutoType(QualType(), AutoTypeKeyword::Auto, 5885 TypeDependence::None, QualType(), 5886 /*concept*/ nullptr, /*args*/ {}), 5887 0); 5888 return AutoDeductTy; 5889 } 5890 5891 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5892 QualType ASTContext::getAutoRRefDeductType() const { 5893 if (AutoRRefDeductTy.isNull()) 5894 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5895 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5896 return AutoRRefDeductTy; 5897 } 5898 5899 /// getTagDeclType - Return the unique reference to the type for the 5900 /// specified TagDecl (struct/union/class/enum) decl. 5901 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5902 assert(Decl); 5903 // FIXME: What is the design on getTagDeclType when it requires casting 5904 // away const? mutable? 5905 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5906 } 5907 5908 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5909 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5910 /// needs to agree with the definition in <stddef.h>. 5911 CanQualType ASTContext::getSizeType() const { 5912 return getFromTargetType(Target->getSizeType()); 5913 } 5914 5915 /// Return the unique signed counterpart of the integer type 5916 /// corresponding to size_t. 5917 CanQualType ASTContext::getSignedSizeType() const { 5918 return getFromTargetType(Target->getSignedSizeType()); 5919 } 5920 5921 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5922 CanQualType ASTContext::getIntMaxType() const { 5923 return getFromTargetType(Target->getIntMaxType()); 5924 } 5925 5926 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5927 CanQualType ASTContext::getUIntMaxType() const { 5928 return getFromTargetType(Target->getUIntMaxType()); 5929 } 5930 5931 /// getSignedWCharType - Return the type of "signed wchar_t". 5932 /// Used when in C++, as a GCC extension. 5933 QualType ASTContext::getSignedWCharType() const { 5934 // FIXME: derive from "Target" ? 5935 return WCharTy; 5936 } 5937 5938 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5939 /// Used when in C++, as a GCC extension. 5940 QualType ASTContext::getUnsignedWCharType() const { 5941 // FIXME: derive from "Target" ? 5942 return UnsignedIntTy; 5943 } 5944 5945 QualType ASTContext::getIntPtrType() const { 5946 return getFromTargetType(Target->getIntPtrType()); 5947 } 5948 5949 QualType ASTContext::getUIntPtrType() const { 5950 return getCorrespondingUnsignedType(getIntPtrType()); 5951 } 5952 5953 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5954 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5955 QualType ASTContext::getPointerDiffType() const { 5956 return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); 5957 } 5958 5959 /// Return the unique unsigned counterpart of "ptrdiff_t" 5960 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5961 /// in the definition of %tu format specifier. 5962 QualType ASTContext::getUnsignedPointerDiffType() const { 5963 return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); 5964 } 5965 5966 /// Return the unique type for "pid_t" defined in 5967 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5968 QualType ASTContext::getProcessIDType() const { 5969 return getFromTargetType(Target->getProcessIDType()); 5970 } 5971 5972 //===----------------------------------------------------------------------===// 5973 // Type Operators 5974 //===----------------------------------------------------------------------===// 5975 5976 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5977 // Push qualifiers into arrays, and then discard any remaining 5978 // qualifiers. 5979 T = getCanonicalType(T); 5980 T = getVariableArrayDecayedType(T); 5981 const Type *Ty = T.getTypePtr(); 5982 QualType Result; 5983 if (isa<ArrayType>(Ty)) { 5984 Result = getArrayDecayedType(QualType(Ty,0)); 5985 } else if (isa<FunctionType>(Ty)) { 5986 Result = getPointerType(QualType(Ty, 0)); 5987 } else { 5988 Result = QualType(Ty, 0); 5989 } 5990 5991 return CanQualType::CreateUnsafe(Result); 5992 } 5993 5994 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5995 Qualifiers &quals) { 5996 SplitQualType splitType = type.getSplitUnqualifiedType(); 5997 5998 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5999 // the unqualified desugared type and then drops it on the floor. 6000 // We then have to strip that sugar back off with 6001 // getUnqualifiedDesugaredType(), which is silly. 6002 const auto *AT = 6003 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 6004 6005 // If we don't have an array, just use the results in splitType. 6006 if (!AT) { 6007 quals = splitType.Quals; 6008 return QualType(splitType.Ty, 0); 6009 } 6010 6011 // Otherwise, recurse on the array's element type. 6012 QualType elementType = AT->getElementType(); 6013 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 6014 6015 // If that didn't change the element type, AT has no qualifiers, so we 6016 // can just use the results in splitType. 6017 if (elementType == unqualElementType) { 6018 assert(quals.empty()); // from the recursive call 6019 quals = splitType.Quals; 6020 return QualType(splitType.Ty, 0); 6021 } 6022 6023 // Otherwise, add in the qualifiers from the outermost type, then 6024 // build the type back up. 6025 quals.addConsistentQualifiers(splitType.Quals); 6026 6027 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 6028 return getConstantArrayType(unqualElementType, CAT->getSize(), 6029 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 6030 } 6031 6032 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 6033 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 6034 } 6035 6036 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 6037 return getVariableArrayType(unqualElementType, 6038 VAT->getSizeExpr(), 6039 VAT->getSizeModifier(), 6040 VAT->getIndexTypeCVRQualifiers(), 6041 VAT->getBracketsRange()); 6042 } 6043 6044 const auto *DSAT = cast<DependentSizedArrayType>(AT); 6045 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 6046 DSAT->getSizeModifier(), 0, 6047 SourceRange()); 6048 } 6049 6050 /// Attempt to unwrap two types that may both be array types with the same bound 6051 /// (or both be array types of unknown bound) for the purpose of comparing the 6052 /// cv-decomposition of two types per C++ [conv.qual]. 6053 /// 6054 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6055 /// C++20 [conv.qual], if permitted by the current language mode. 6056 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 6057 bool AllowPiMismatch) { 6058 while (true) { 6059 auto *AT1 = getAsArrayType(T1); 6060 if (!AT1) 6061 return; 6062 6063 auto *AT2 = getAsArrayType(T2); 6064 if (!AT2) 6065 return; 6066 6067 // If we don't have two array types with the same constant bound nor two 6068 // incomplete array types, we've unwrapped everything we can. 6069 // C++20 also permits one type to be a constant array type and the other 6070 // to be an incomplete array type. 6071 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6072 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6073 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6074 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6075 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6076 isa<IncompleteArrayType>(AT2)))) 6077 return; 6078 } else if (isa<IncompleteArrayType>(AT1)) { 6079 if (!(isa<IncompleteArrayType>(AT2) || 6080 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6081 isa<ConstantArrayType>(AT2)))) 6082 return; 6083 } else { 6084 return; 6085 } 6086 6087 T1 = AT1->getElementType(); 6088 T2 = AT2->getElementType(); 6089 } 6090 } 6091 6092 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6093 /// 6094 /// If T1 and T2 are both pointer types of the same kind, or both array types 6095 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6096 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6097 /// 6098 /// This function will typically be called in a loop that successively 6099 /// "unwraps" pointer and pointer-to-member types to compare them at each 6100 /// level. 6101 /// 6102 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6103 /// C++20 [conv.qual], if permitted by the current language mode. 6104 /// 6105 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6106 /// pair of types that can't be unwrapped further. 6107 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6108 bool AllowPiMismatch) { 6109 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6110 6111 const auto *T1PtrType = T1->getAs<PointerType>(); 6112 const auto *T2PtrType = T2->getAs<PointerType>(); 6113 if (T1PtrType && T2PtrType) { 6114 T1 = T1PtrType->getPointeeType(); 6115 T2 = T2PtrType->getPointeeType(); 6116 return true; 6117 } 6118 6119 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6120 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6121 if (T1MPType && T2MPType && 6122 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6123 QualType(T2MPType->getClass(), 0))) { 6124 T1 = T1MPType->getPointeeType(); 6125 T2 = T2MPType->getPointeeType(); 6126 return true; 6127 } 6128 6129 if (getLangOpts().ObjC) { 6130 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6131 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6132 if (T1OPType && T2OPType) { 6133 T1 = T1OPType->getPointeeType(); 6134 T2 = T2OPType->getPointeeType(); 6135 return true; 6136 } 6137 } 6138 6139 // FIXME: Block pointers, too? 6140 6141 return false; 6142 } 6143 6144 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6145 while (true) { 6146 Qualifiers Quals; 6147 T1 = getUnqualifiedArrayType(T1, Quals); 6148 T2 = getUnqualifiedArrayType(T2, Quals); 6149 if (hasSameType(T1, T2)) 6150 return true; 6151 if (!UnwrapSimilarTypes(T1, T2)) 6152 return false; 6153 } 6154 } 6155 6156 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6157 while (true) { 6158 Qualifiers Quals1, Quals2; 6159 T1 = getUnqualifiedArrayType(T1, Quals1); 6160 T2 = getUnqualifiedArrayType(T2, Quals2); 6161 6162 Quals1.removeCVRQualifiers(); 6163 Quals2.removeCVRQualifiers(); 6164 if (Quals1 != Quals2) 6165 return false; 6166 6167 if (hasSameType(T1, T2)) 6168 return true; 6169 6170 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6171 return false; 6172 } 6173 } 6174 6175 DeclarationNameInfo 6176 ASTContext::getNameForTemplate(TemplateName Name, 6177 SourceLocation NameLoc) const { 6178 switch (Name.getKind()) { 6179 case TemplateName::QualifiedTemplate: 6180 case TemplateName::Template: 6181 // DNInfo work in progress: CHECKME: what about DNLoc? 6182 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6183 NameLoc); 6184 6185 case TemplateName::OverloadedTemplate: { 6186 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6187 // DNInfo work in progress: CHECKME: what about DNLoc? 6188 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6189 } 6190 6191 case TemplateName::AssumedTemplate: { 6192 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6193 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6194 } 6195 6196 case TemplateName::DependentTemplate: { 6197 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6198 DeclarationName DName; 6199 if (DTN->isIdentifier()) { 6200 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6201 return DeclarationNameInfo(DName, NameLoc); 6202 } else { 6203 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6204 // DNInfo work in progress: FIXME: source locations? 6205 DeclarationNameLoc DNLoc = 6206 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6207 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6208 } 6209 } 6210 6211 case TemplateName::SubstTemplateTemplateParm: { 6212 SubstTemplateTemplateParmStorage *subst 6213 = Name.getAsSubstTemplateTemplateParm(); 6214 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6215 NameLoc); 6216 } 6217 6218 case TemplateName::SubstTemplateTemplateParmPack: { 6219 SubstTemplateTemplateParmPackStorage *subst 6220 = Name.getAsSubstTemplateTemplateParmPack(); 6221 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6222 NameLoc); 6223 } 6224 case TemplateName::UsingTemplate: 6225 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6226 NameLoc); 6227 } 6228 6229 llvm_unreachable("bad template name kind!"); 6230 } 6231 6232 TemplateName 6233 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6234 switch (Name.getKind()) { 6235 case TemplateName::UsingTemplate: 6236 case TemplateName::QualifiedTemplate: 6237 case TemplateName::Template: { 6238 TemplateDecl *Template = Name.getAsTemplateDecl(); 6239 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6240 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6241 6242 // The canonical template name is the canonical template declaration. 6243 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6244 } 6245 6246 case TemplateName::OverloadedTemplate: 6247 case TemplateName::AssumedTemplate: 6248 llvm_unreachable("cannot canonicalize unresolved template"); 6249 6250 case TemplateName::DependentTemplate: { 6251 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6252 assert(DTN && "Non-dependent template names must refer to template decls."); 6253 return DTN->CanonicalTemplateName; 6254 } 6255 6256 case TemplateName::SubstTemplateTemplateParm: { 6257 SubstTemplateTemplateParmStorage *subst 6258 = Name.getAsSubstTemplateTemplateParm(); 6259 return getCanonicalTemplateName(subst->getReplacement()); 6260 } 6261 6262 case TemplateName::SubstTemplateTemplateParmPack: { 6263 SubstTemplateTemplateParmPackStorage *subst = 6264 Name.getAsSubstTemplateTemplateParmPack(); 6265 TemplateArgument canonArgPack = 6266 getCanonicalTemplateArgument(subst->getArgumentPack()); 6267 return getSubstTemplateTemplateParmPack( 6268 canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(), 6269 subst->getFinal(), subst->getIndex()); 6270 } 6271 } 6272 6273 llvm_unreachable("bad template name!"); 6274 } 6275 6276 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6277 const TemplateName &Y) const { 6278 return getCanonicalTemplateName(X).getAsVoidPointer() == 6279 getCanonicalTemplateName(Y).getAsVoidPointer(); 6280 } 6281 6282 bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { 6283 if (!XCE != !YCE) 6284 return false; 6285 6286 if (!XCE) 6287 return true; 6288 6289 llvm::FoldingSetNodeID XCEID, YCEID; 6290 XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6291 YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6292 return XCEID == YCEID; 6293 } 6294 6295 bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, 6296 const TypeConstraint *YTC) const { 6297 if (!XTC != !YTC) 6298 return false; 6299 6300 if (!XTC) 6301 return true; 6302 6303 auto *NCX = XTC->getNamedConcept(); 6304 auto *NCY = YTC->getNamedConcept(); 6305 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6306 return false; 6307 if (XTC->getConceptReference()->hasExplicitTemplateArgs() != 6308 YTC->getConceptReference()->hasExplicitTemplateArgs()) 6309 return false; 6310 if (XTC->getConceptReference()->hasExplicitTemplateArgs()) 6311 if (XTC->getConceptReference() 6312 ->getTemplateArgsAsWritten() 6313 ->NumTemplateArgs != 6314 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs) 6315 return false; 6316 6317 // Compare slowly by profiling. 6318 // 6319 // We couldn't compare the profiling result for the template 6320 // args here. Consider the following example in different modules: 6321 // 6322 // template <__integer_like _Tp, C<_Tp> Sentinel> 6323 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { 6324 // return __t; 6325 // } 6326 // 6327 // When we compare the profiling result for `C<_Tp>` in different 6328 // modules, it will compare the type of `_Tp` in different modules. 6329 // However, the type of `_Tp` in different modules refer to different 6330 // types here naturally. So we couldn't compare the profiling result 6331 // for the template args directly. 6332 return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), 6333 YTC->getImmediatelyDeclaredConstraint()); 6334 } 6335 6336 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6337 const NamedDecl *Y) const { 6338 if (X->getKind() != Y->getKind()) 6339 return false; 6340 6341 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6342 auto *TY = cast<TemplateTypeParmDecl>(Y); 6343 if (TX->isParameterPack() != TY->isParameterPack()) 6344 return false; 6345 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6346 return false; 6347 return isSameTypeConstraint(TX->getTypeConstraint(), 6348 TY->getTypeConstraint()); 6349 } 6350 6351 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6352 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6353 return TX->isParameterPack() == TY->isParameterPack() && 6354 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) && 6355 isSameConstraintExpr(TX->getPlaceholderTypeConstraint(), 6356 TY->getPlaceholderTypeConstraint()); 6357 } 6358 6359 auto *TX = cast<TemplateTemplateParmDecl>(X); 6360 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6361 return TX->isParameterPack() == TY->isParameterPack() && 6362 isSameTemplateParameterList(TX->getTemplateParameters(), 6363 TY->getTemplateParameters()); 6364 } 6365 6366 bool ASTContext::isSameTemplateParameterList( 6367 const TemplateParameterList *X, const TemplateParameterList *Y) const { 6368 if (X->size() != Y->size()) 6369 return false; 6370 6371 for (unsigned I = 0, N = X->size(); I != N; ++I) 6372 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6373 return false; 6374 6375 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); 6376 } 6377 6378 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, 6379 const NamedDecl *Y) const { 6380 // If the type parameter isn't the same already, we don't need to check the 6381 // default argument further. 6382 if (!isSameTemplateParameter(X, Y)) 6383 return false; 6384 6385 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { 6386 auto *TTPY = cast<TemplateTypeParmDecl>(Y); 6387 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6388 return false; 6389 6390 return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument()); 6391 } 6392 6393 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6394 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); 6395 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) 6396 return false; 6397 6398 Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts(); 6399 Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts(); 6400 llvm::FoldingSetNodeID XID, YID; 6401 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); 6402 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); 6403 return XID == YID; 6404 } 6405 6406 auto *TTPX = cast<TemplateTemplateParmDecl>(X); 6407 auto *TTPY = cast<TemplateTemplateParmDecl>(Y); 6408 6409 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6410 return false; 6411 6412 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); 6413 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); 6414 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); 6415 } 6416 6417 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6418 if (auto *NS = X->getAsNamespace()) 6419 return NS; 6420 if (auto *NAS = X->getAsNamespaceAlias()) 6421 return NAS->getNamespace(); 6422 return nullptr; 6423 } 6424 6425 static bool isSameQualifier(const NestedNameSpecifier *X, 6426 const NestedNameSpecifier *Y) { 6427 if (auto *NSX = getNamespace(X)) { 6428 auto *NSY = getNamespace(Y); 6429 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6430 return false; 6431 } else if (X->getKind() != Y->getKind()) 6432 return false; 6433 6434 // FIXME: For namespaces and types, we're permitted to check that the entity 6435 // is named via the same tokens. We should probably do so. 6436 switch (X->getKind()) { 6437 case NestedNameSpecifier::Identifier: 6438 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6439 return false; 6440 break; 6441 case NestedNameSpecifier::Namespace: 6442 case NestedNameSpecifier::NamespaceAlias: 6443 // We've already checked that we named the same namespace. 6444 break; 6445 case NestedNameSpecifier::TypeSpec: 6446 case NestedNameSpecifier::TypeSpecWithTemplate: 6447 if (X->getAsType()->getCanonicalTypeInternal() != 6448 Y->getAsType()->getCanonicalTypeInternal()) 6449 return false; 6450 break; 6451 case NestedNameSpecifier::Global: 6452 case NestedNameSpecifier::Super: 6453 return true; 6454 } 6455 6456 // Recurse into earlier portion of NNS, if any. 6457 auto *PX = X->getPrefix(); 6458 auto *PY = Y->getPrefix(); 6459 if (PX && PY) 6460 return isSameQualifier(PX, PY); 6461 return !PX && !PY; 6462 } 6463 6464 /// Determine whether the attributes we can overload on are identical for A and 6465 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6466 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6467 const FunctionDecl *B) { 6468 // Note that pass_object_size attributes are represented in the function's 6469 // ExtParameterInfo, so we don't need to check them here. 6470 6471 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6472 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6473 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6474 6475 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6476 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6477 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6478 6479 // Return false if the number of enable_if attributes is different. 6480 if (!Cand1A || !Cand2A) 6481 return false; 6482 6483 Cand1ID.clear(); 6484 Cand2ID.clear(); 6485 6486 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6487 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6488 6489 // Return false if any of the enable_if expressions of A and B are 6490 // different. 6491 if (Cand1ID != Cand2ID) 6492 return false; 6493 } 6494 return true; 6495 } 6496 6497 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { 6498 // Caution: this function is called by the AST reader during deserialization, 6499 // so it cannot rely on AST invariants being met. Non-trivial accessors 6500 // should be avoided, along with any traversal of redeclaration chains. 6501 6502 if (X == Y) 6503 return true; 6504 6505 if (X->getDeclName() != Y->getDeclName()) 6506 return false; 6507 6508 // Must be in the same context. 6509 // 6510 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6511 // could be two different declarations of the same function. (We will fix the 6512 // semantic DC to refer to the primary definition after merging.) 6513 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6514 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6515 return false; 6516 6517 // Two typedefs refer to the same entity if they have the same underlying 6518 // type. 6519 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6520 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6521 return hasSameType(TypedefX->getUnderlyingType(), 6522 TypedefY->getUnderlyingType()); 6523 6524 // Must have the same kind. 6525 if (X->getKind() != Y->getKind()) 6526 return false; 6527 6528 // Objective-C classes and protocols with the same name always match. 6529 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6530 return true; 6531 6532 if (isa<ClassTemplateSpecializationDecl>(X)) { 6533 // No need to handle these here: we merge them when adding them to the 6534 // template. 6535 return false; 6536 } 6537 6538 // Compatible tags match. 6539 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6540 const auto *TagY = cast<TagDecl>(Y); 6541 return (TagX->getTagKind() == TagY->getTagKind()) || 6542 ((TagX->getTagKind() == TagTypeKind::Struct || 6543 TagX->getTagKind() == TagTypeKind::Class || 6544 TagX->getTagKind() == TagTypeKind::Interface) && 6545 (TagY->getTagKind() == TagTypeKind::Struct || 6546 TagY->getTagKind() == TagTypeKind::Class || 6547 TagY->getTagKind() == TagTypeKind::Interface)); 6548 } 6549 6550 // Functions with the same type and linkage match. 6551 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6552 // functions, etc. 6553 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6554 const auto *FuncY = cast<FunctionDecl>(Y); 6555 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6556 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6557 if (CtorX->getInheritedConstructor() && 6558 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6559 CtorY->getInheritedConstructor().getConstructor())) 6560 return false; 6561 } 6562 6563 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6564 return false; 6565 6566 // Multiversioned functions with different feature strings are represented 6567 // as separate declarations. 6568 if (FuncX->isMultiVersion()) { 6569 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6570 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6571 assert(TAX && TAY && "Multiversion Function without target attribute"); 6572 6573 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6574 return false; 6575 } 6576 6577 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes 6578 // not the same entity if they are constrained. 6579 if ((FuncX->isMemberLikeConstrainedFriend() || 6580 FuncY->isMemberLikeConstrainedFriend()) && 6581 !FuncX->getLexicalDeclContext()->Equals( 6582 FuncY->getLexicalDeclContext())) { 6583 return false; 6584 } 6585 6586 if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), 6587 FuncY->getTrailingRequiresClause())) 6588 return false; 6589 6590 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6591 // Map to the first declaration that we've already merged into this one. 6592 // The TSI of redeclarations might not match (due to calling conventions 6593 // being inherited onto the type but not the TSI), but the TSI type of 6594 // the first declaration of the function should match across modules. 6595 FD = FD->getCanonicalDecl(); 6596 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6597 : FD->getType(); 6598 }; 6599 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6600 if (!hasSameType(XT, YT)) { 6601 // We can get functions with different types on the redecl chain in C++17 6602 // if they have differing exception specifications and at least one of 6603 // the excpetion specs is unresolved. 6604 auto *XFPT = XT->getAs<FunctionProtoType>(); 6605 auto *YFPT = YT->getAs<FunctionProtoType>(); 6606 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6607 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6608 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6609 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6610 return true; 6611 return false; 6612 } 6613 6614 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6615 hasSameOverloadableAttrs(FuncX, FuncY); 6616 } 6617 6618 // Variables with the same type and linkage match. 6619 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6620 const auto *VarY = cast<VarDecl>(Y); 6621 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6622 // During deserialization, we might compare variables before we load 6623 // their types. Assume the types will end up being the same. 6624 if (VarX->getType().isNull() || VarY->getType().isNull()) 6625 return true; 6626 6627 if (hasSameType(VarX->getType(), VarY->getType())) 6628 return true; 6629 6630 // We can get decls with different types on the redecl chain. Eg. 6631 // template <typename T> struct S { static T Var[]; }; // #1 6632 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6633 // Only? happens when completing an incomplete array type. In this case 6634 // when comparing #1 and #2 we should go through their element type. 6635 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6636 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6637 if (!VarXTy || !VarYTy) 6638 return false; 6639 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6640 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6641 } 6642 return false; 6643 } 6644 6645 // Namespaces with the same name and inlinedness match. 6646 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6647 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6648 return NamespaceX->isInline() == NamespaceY->isInline(); 6649 } 6650 6651 // Identical template names and kinds match if their template parameter lists 6652 // and patterns match. 6653 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6654 const auto *TemplateY = cast<TemplateDecl>(Y); 6655 6656 // ConceptDecl wouldn't be the same if their constraint expression differs. 6657 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { 6658 const auto *ConceptY = cast<ConceptDecl>(Y); 6659 if (!isSameConstraintExpr(ConceptX->getConstraintExpr(), 6660 ConceptY->getConstraintExpr())) 6661 return false; 6662 } 6663 6664 return isSameEntity(TemplateX->getTemplatedDecl(), 6665 TemplateY->getTemplatedDecl()) && 6666 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6667 TemplateY->getTemplateParameters()); 6668 } 6669 6670 // Fields with the same name and the same type match. 6671 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6672 const auto *FDY = cast<FieldDecl>(Y); 6673 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6674 return hasSameType(FDX->getType(), FDY->getType()); 6675 } 6676 6677 // Indirect fields with the same target field match. 6678 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6679 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6680 return IFDX->getAnonField()->getCanonicalDecl() == 6681 IFDY->getAnonField()->getCanonicalDecl(); 6682 } 6683 6684 // Enumerators with the same name match. 6685 if (isa<EnumConstantDecl>(X)) 6686 // FIXME: Also check the value is odr-equivalent. 6687 return true; 6688 6689 // Using shadow declarations with the same target match. 6690 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6691 const auto *USY = cast<UsingShadowDecl>(Y); 6692 return USX->getTargetDecl() == USY->getTargetDecl(); 6693 } 6694 6695 // Using declarations with the same qualifier match. (We already know that 6696 // the name matches.) 6697 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6698 const auto *UY = cast<UsingDecl>(Y); 6699 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6700 UX->hasTypename() == UY->hasTypename() && 6701 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6702 } 6703 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6704 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6705 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6706 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6707 } 6708 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6709 return isSameQualifier( 6710 UX->getQualifier(), 6711 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6712 } 6713 6714 // Using-pack declarations are only created by instantiation, and match if 6715 // they're instantiated from matching UnresolvedUsing...Decls. 6716 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6717 return declaresSameEntity( 6718 UX->getInstantiatedFromUsingDecl(), 6719 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6720 } 6721 6722 // Namespace alias definitions with the same target match. 6723 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6724 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6725 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6726 } 6727 6728 return false; 6729 } 6730 6731 TemplateArgument 6732 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6733 switch (Arg.getKind()) { 6734 case TemplateArgument::Null: 6735 return Arg; 6736 6737 case TemplateArgument::Expression: 6738 return Arg; 6739 6740 case TemplateArgument::Declaration: { 6741 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6742 return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()), 6743 Arg.getIsDefaulted()); 6744 } 6745 6746 case TemplateArgument::NullPtr: 6747 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6748 /*isNullPtr*/ true, Arg.getIsDefaulted()); 6749 6750 case TemplateArgument::Template: 6751 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()), 6752 Arg.getIsDefaulted()); 6753 6754 case TemplateArgument::TemplateExpansion: 6755 return TemplateArgument( 6756 getCanonicalTemplateName(Arg.getAsTemplateOrTemplatePattern()), 6757 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted()); 6758 6759 case TemplateArgument::Integral: 6760 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6761 6762 case TemplateArgument::StructuralValue: 6763 return TemplateArgument(*this, 6764 getCanonicalType(Arg.getStructuralValueType()), 6765 Arg.getAsStructuralValue()); 6766 6767 case TemplateArgument::Type: 6768 return TemplateArgument(getCanonicalType(Arg.getAsType()), 6769 /*isNullPtr*/ false, Arg.getIsDefaulted()); 6770 6771 case TemplateArgument::Pack: { 6772 bool AnyNonCanonArgs = false; 6773 auto CanonArgs = ::getCanonicalTemplateArguments( 6774 *this, Arg.pack_elements(), AnyNonCanonArgs); 6775 if (!AnyNonCanonArgs) 6776 return Arg; 6777 return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), 6778 CanonArgs); 6779 } 6780 } 6781 6782 // Silence GCC warning 6783 llvm_unreachable("Unhandled template argument kind"); 6784 } 6785 6786 NestedNameSpecifier * 6787 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6788 if (!NNS) 6789 return nullptr; 6790 6791 switch (NNS->getKind()) { 6792 case NestedNameSpecifier::Identifier: 6793 // Canonicalize the prefix but keep the identifier the same. 6794 return NestedNameSpecifier::Create(*this, 6795 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6796 NNS->getAsIdentifier()); 6797 6798 case NestedNameSpecifier::Namespace: 6799 // A namespace is canonical; build a nested-name-specifier with 6800 // this namespace and no prefix. 6801 return NestedNameSpecifier::Create(*this, nullptr, 6802 NNS->getAsNamespace()->getOriginalNamespace()); 6803 6804 case NestedNameSpecifier::NamespaceAlias: 6805 // A namespace is canonical; build a nested-name-specifier with 6806 // this namespace and no prefix. 6807 return NestedNameSpecifier::Create(*this, nullptr, 6808 NNS->getAsNamespaceAlias()->getNamespace() 6809 ->getOriginalNamespace()); 6810 6811 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6812 // latter will have the 'template' keyword when printed. 6813 case NestedNameSpecifier::TypeSpec: 6814 case NestedNameSpecifier::TypeSpecWithTemplate: { 6815 const Type *T = getCanonicalType(NNS->getAsType()); 6816 6817 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6818 // break it apart into its prefix and identifier, then reconsititute those 6819 // as the canonical nested-name-specifier. This is required to canonicalize 6820 // a dependent nested-name-specifier involving typedefs of dependent-name 6821 // types, e.g., 6822 // typedef typename T::type T1; 6823 // typedef typename T1::type T2; 6824 if (const auto *DNT = T->getAs<DependentNameType>()) 6825 return NestedNameSpecifier::Create( 6826 *this, DNT->getQualifier(), 6827 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6828 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6829 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6830 const_cast<Type *>(T)); 6831 6832 // TODO: Set 'Template' parameter to true for other template types. 6833 return NestedNameSpecifier::Create(*this, nullptr, false, 6834 const_cast<Type *>(T)); 6835 } 6836 6837 case NestedNameSpecifier::Global: 6838 case NestedNameSpecifier::Super: 6839 // The global specifier and __super specifer are canonical and unique. 6840 return NNS; 6841 } 6842 6843 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6844 } 6845 6846 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6847 // Handle the non-qualified case efficiently. 6848 if (!T.hasLocalQualifiers()) { 6849 // Handle the common positive case fast. 6850 if (const auto *AT = dyn_cast<ArrayType>(T)) 6851 return AT; 6852 } 6853 6854 // Handle the common negative case fast. 6855 if (!isa<ArrayType>(T.getCanonicalType())) 6856 return nullptr; 6857 6858 // Apply any qualifiers from the array type to the element type. This 6859 // implements C99 6.7.3p8: "If the specification of an array type includes 6860 // any type qualifiers, the element type is so qualified, not the array type." 6861 6862 // If we get here, we either have type qualifiers on the type, or we have 6863 // sugar such as a typedef in the way. If we have type qualifiers on the type 6864 // we must propagate them down into the element type. 6865 6866 SplitQualType split = T.getSplitDesugaredType(); 6867 Qualifiers qs = split.Quals; 6868 6869 // If we have a simple case, just return now. 6870 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6871 if (!ATy || qs.empty()) 6872 return ATy; 6873 6874 // Otherwise, we have an array and we have qualifiers on it. Push the 6875 // qualifiers into the array element type and return a new array type. 6876 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6877 6878 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6879 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6880 CAT->getSizeExpr(), 6881 CAT->getSizeModifier(), 6882 CAT->getIndexTypeCVRQualifiers())); 6883 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6884 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6885 IAT->getSizeModifier(), 6886 IAT->getIndexTypeCVRQualifiers())); 6887 6888 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6889 return cast<ArrayType>( 6890 getDependentSizedArrayType(NewEltTy, 6891 DSAT->getSizeExpr(), 6892 DSAT->getSizeModifier(), 6893 DSAT->getIndexTypeCVRQualifiers(), 6894 DSAT->getBracketsRange())); 6895 6896 const auto *VAT = cast<VariableArrayType>(ATy); 6897 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6898 VAT->getSizeExpr(), 6899 VAT->getSizeModifier(), 6900 VAT->getIndexTypeCVRQualifiers(), 6901 VAT->getBracketsRange())); 6902 } 6903 6904 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6905 if (T->isArrayType() || T->isFunctionType()) 6906 return getDecayedType(T); 6907 return T; 6908 } 6909 6910 QualType ASTContext::getSignatureParameterType(QualType T) const { 6911 T = getVariableArrayDecayedType(T); 6912 T = getAdjustedParameterType(T); 6913 return T.getUnqualifiedType(); 6914 } 6915 6916 QualType ASTContext::getExceptionObjectType(QualType T) const { 6917 // C++ [except.throw]p3: 6918 // A throw-expression initializes a temporary object, called the exception 6919 // object, the type of which is determined by removing any top-level 6920 // cv-qualifiers from the static type of the operand of throw and adjusting 6921 // the type from "array of T" or "function returning T" to "pointer to T" 6922 // or "pointer to function returning T", [...] 6923 T = getVariableArrayDecayedType(T); 6924 if (T->isArrayType() || T->isFunctionType()) 6925 T = getDecayedType(T); 6926 return T.getUnqualifiedType(); 6927 } 6928 6929 /// getArrayDecayedType - Return the properly qualified result of decaying the 6930 /// specified array type to a pointer. This operation is non-trivial when 6931 /// handling typedefs etc. The canonical type of "T" must be an array type, 6932 /// this returns a pointer to a properly qualified element of the array. 6933 /// 6934 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6935 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6936 // Get the element type with 'getAsArrayType' so that we don't lose any 6937 // typedefs in the element type of the array. This also handles propagation 6938 // of type qualifiers from the array type into the element type if present 6939 // (C99 6.7.3p8). 6940 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6941 assert(PrettyArrayType && "Not an array type!"); 6942 6943 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6944 6945 // int x[restrict 4] -> int *restrict 6946 QualType Result = getQualifiedType(PtrTy, 6947 PrettyArrayType->getIndexTypeQualifiers()); 6948 6949 // int x[_Nullable] -> int * _Nullable 6950 if (auto Nullability = Ty->getNullability()) { 6951 Result = const_cast<ASTContext *>(this)->getAttributedType( 6952 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6953 } 6954 return Result; 6955 } 6956 6957 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6958 return getBaseElementType(array->getElementType()); 6959 } 6960 6961 QualType ASTContext::getBaseElementType(QualType type) const { 6962 Qualifiers qs; 6963 while (true) { 6964 SplitQualType split = type.getSplitDesugaredType(); 6965 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6966 if (!array) break; 6967 6968 type = array->getElementType(); 6969 qs.addConsistentQualifiers(split.Quals); 6970 } 6971 6972 return getQualifiedType(type, qs); 6973 } 6974 6975 /// getConstantArrayElementCount - Returns number of constant array elements. 6976 uint64_t 6977 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6978 uint64_t ElementCount = 1; 6979 do { 6980 ElementCount *= CA->getSize().getZExtValue(); 6981 CA = dyn_cast_or_null<ConstantArrayType>( 6982 CA->getElementType()->getAsArrayTypeUnsafe()); 6983 } while (CA); 6984 return ElementCount; 6985 } 6986 6987 uint64_t ASTContext::getArrayInitLoopExprElementCount( 6988 const ArrayInitLoopExpr *AILE) const { 6989 if (!AILE) 6990 return 0; 6991 6992 uint64_t ElementCount = 1; 6993 6994 do { 6995 ElementCount *= AILE->getArraySize().getZExtValue(); 6996 AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr()); 6997 } while (AILE); 6998 6999 return ElementCount; 7000 } 7001 7002 /// getFloatingRank - Return a relative rank for floating point types. 7003 /// This routine will assert if passed a built-in type that isn't a float. 7004 static FloatingRank getFloatingRank(QualType T) { 7005 if (const auto *CT = T->getAs<ComplexType>()) 7006 return getFloatingRank(CT->getElementType()); 7007 7008 switch (T->castAs<BuiltinType>()->getKind()) { 7009 default: llvm_unreachable("getFloatingRank(): not a floating type"); 7010 case BuiltinType::Float16: return Float16Rank; 7011 case BuiltinType::Half: return HalfRank; 7012 case BuiltinType::Float: return FloatRank; 7013 case BuiltinType::Double: return DoubleRank; 7014 case BuiltinType::LongDouble: return LongDoubleRank; 7015 case BuiltinType::Float128: return Float128Rank; 7016 case BuiltinType::BFloat16: return BFloat16Rank; 7017 case BuiltinType::Ibm128: return Ibm128Rank; 7018 } 7019 } 7020 7021 /// getFloatingTypeOrder - Compare the rank of the two specified floating 7022 /// point types, ignoring the domain of the type (i.e. 'double' == 7023 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 7024 /// LHS < RHS, return -1. 7025 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 7026 FloatingRank LHSR = getFloatingRank(LHS); 7027 FloatingRank RHSR = getFloatingRank(RHS); 7028 7029 if (LHSR == RHSR) 7030 return 0; 7031 if (LHSR > RHSR) 7032 return 1; 7033 return -1; 7034 } 7035 7036 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 7037 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 7038 return 0; 7039 return getFloatingTypeOrder(LHS, RHS); 7040 } 7041 7042 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 7043 /// routine will assert if passed a built-in type that isn't an integer or enum, 7044 /// or if it is not canonicalized. 7045 unsigned ASTContext::getIntegerRank(const Type *T) const { 7046 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 7047 7048 // Results in this 'losing' to any type of the same size, but winning if 7049 // larger. 7050 if (const auto *EIT = dyn_cast<BitIntType>(T)) 7051 return 0 + (EIT->getNumBits() << 3); 7052 7053 switch (cast<BuiltinType>(T)->getKind()) { 7054 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 7055 case BuiltinType::Bool: 7056 return 1 + (getIntWidth(BoolTy) << 3); 7057 case BuiltinType::Char_S: 7058 case BuiltinType::Char_U: 7059 case BuiltinType::SChar: 7060 case BuiltinType::UChar: 7061 return 2 + (getIntWidth(CharTy) << 3); 7062 case BuiltinType::Short: 7063 case BuiltinType::UShort: 7064 return 3 + (getIntWidth(ShortTy) << 3); 7065 case BuiltinType::Int: 7066 case BuiltinType::UInt: 7067 return 4 + (getIntWidth(IntTy) << 3); 7068 case BuiltinType::Long: 7069 case BuiltinType::ULong: 7070 return 5 + (getIntWidth(LongTy) << 3); 7071 case BuiltinType::LongLong: 7072 case BuiltinType::ULongLong: 7073 return 6 + (getIntWidth(LongLongTy) << 3); 7074 case BuiltinType::Int128: 7075 case BuiltinType::UInt128: 7076 return 7 + (getIntWidth(Int128Ty) << 3); 7077 7078 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of 7079 // their underlying types" [c++20 conv.rank] 7080 case BuiltinType::Char8: 7081 return getIntegerRank(UnsignedCharTy.getTypePtr()); 7082 case BuiltinType::Char16: 7083 return getIntegerRank( 7084 getFromTargetType(Target->getChar16Type()).getTypePtr()); 7085 case BuiltinType::Char32: 7086 return getIntegerRank( 7087 getFromTargetType(Target->getChar32Type()).getTypePtr()); 7088 case BuiltinType::WChar_S: 7089 case BuiltinType::WChar_U: 7090 return getIntegerRank( 7091 getFromTargetType(Target->getWCharType()).getTypePtr()); 7092 } 7093 } 7094 7095 /// Whether this is a promotable bitfield reference according 7096 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 7097 /// 7098 /// \returns the type this bit-field will promote to, or NULL if no 7099 /// promotion occurs. 7100 QualType ASTContext::isPromotableBitField(Expr *E) const { 7101 if (E->isTypeDependent() || E->isValueDependent()) 7102 return {}; 7103 7104 // C++ [conv.prom]p5: 7105 // If the bit-field has an enumerated type, it is treated as any other 7106 // value of that type for promotion purposes. 7107 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 7108 return {}; 7109 7110 // FIXME: We should not do this unless E->refersToBitField() is true. This 7111 // matters in C where getSourceBitField() will find bit-fields for various 7112 // cases where the source expression is not a bit-field designator. 7113 7114 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 7115 if (!Field) 7116 return {}; 7117 7118 QualType FT = Field->getType(); 7119 7120 uint64_t BitWidth = Field->getBitWidthValue(*this); 7121 uint64_t IntSize = getTypeSize(IntTy); 7122 // C++ [conv.prom]p5: 7123 // A prvalue for an integral bit-field can be converted to a prvalue of type 7124 // int if int can represent all the values of the bit-field; otherwise, it 7125 // can be converted to unsigned int if unsigned int can represent all the 7126 // values of the bit-field. If the bit-field is larger yet, no integral 7127 // promotion applies to it. 7128 // C11 6.3.1.1/2: 7129 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 7130 // If an int can represent all values of the original type (as restricted by 7131 // the width, for a bit-field), the value is converted to an int; otherwise, 7132 // it is converted to an unsigned int. 7133 // 7134 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 7135 // We perform that promotion here to match GCC and C++. 7136 // FIXME: C does not permit promotion of an enum bit-field whose rank is 7137 // greater than that of 'int'. We perform that promotion to match GCC. 7138 if (BitWidth < IntSize) 7139 return IntTy; 7140 7141 if (BitWidth == IntSize) 7142 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 7143 7144 // Bit-fields wider than int are not subject to promotions, and therefore act 7145 // like the base type. GCC has some weird bugs in this area that we 7146 // deliberately do not follow (GCC follows a pre-standard resolution to 7147 // C's DR315 which treats bit-width as being part of the type, and this leaks 7148 // into their semantics in some cases). 7149 return {}; 7150 } 7151 7152 /// getPromotedIntegerType - Returns the type that Promotable will 7153 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 7154 /// integer type. 7155 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 7156 assert(!Promotable.isNull()); 7157 assert(isPromotableIntegerType(Promotable)); 7158 if (const auto *ET = Promotable->getAs<EnumType>()) 7159 return ET->getDecl()->getPromotionType(); 7160 7161 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 7162 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 7163 // (3.9.1) can be converted to a prvalue of the first of the following 7164 // types that can represent all the values of its underlying type: 7165 // int, unsigned int, long int, unsigned long int, long long int, or 7166 // unsigned long long int [...] 7167 // FIXME: Is there some better way to compute this? 7168 if (BT->getKind() == BuiltinType::WChar_S || 7169 BT->getKind() == BuiltinType::WChar_U || 7170 BT->getKind() == BuiltinType::Char8 || 7171 BT->getKind() == BuiltinType::Char16 || 7172 BT->getKind() == BuiltinType::Char32) { 7173 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 7174 uint64_t FromSize = getTypeSize(BT); 7175 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 7176 LongLongTy, UnsignedLongLongTy }; 7177 for (const auto &PT : PromoteTypes) { 7178 uint64_t ToSize = getTypeSize(PT); 7179 if (FromSize < ToSize || 7180 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType())) 7181 return PT; 7182 } 7183 llvm_unreachable("char type should fit into long long"); 7184 } 7185 } 7186 7187 // At this point, we should have a signed or unsigned integer type. 7188 if (Promotable->isSignedIntegerType()) 7189 return IntTy; 7190 uint64_t PromotableSize = getIntWidth(Promotable); 7191 uint64_t IntSize = getIntWidth(IntTy); 7192 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7193 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7194 } 7195 7196 /// Recurses in pointer/array types until it finds an objc retainable 7197 /// type and returns its ownership. 7198 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7199 while (!T.isNull()) { 7200 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7201 return T.getObjCLifetime(); 7202 if (T->isArrayType()) 7203 T = getBaseElementType(T); 7204 else if (const auto *PT = T->getAs<PointerType>()) 7205 T = PT->getPointeeType(); 7206 else if (const auto *RT = T->getAs<ReferenceType>()) 7207 T = RT->getPointeeType(); 7208 else 7209 break; 7210 } 7211 7212 return Qualifiers::OCL_None; 7213 } 7214 7215 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7216 // Incomplete enum types are not treated as integer types. 7217 // FIXME: In C++, enum types are never integer types. 7218 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7219 return ET->getDecl()->getIntegerType().getTypePtr(); 7220 return nullptr; 7221 } 7222 7223 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7224 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7225 /// LHS < RHS, return -1. 7226 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7227 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7228 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7229 7230 // Unwrap enums to their underlying type. 7231 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7232 LHSC = getIntegerTypeForEnum(ET); 7233 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7234 RHSC = getIntegerTypeForEnum(ET); 7235 7236 if (LHSC == RHSC) return 0; 7237 7238 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7239 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7240 7241 unsigned LHSRank = getIntegerRank(LHSC); 7242 unsigned RHSRank = getIntegerRank(RHSC); 7243 7244 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7245 if (LHSRank == RHSRank) return 0; 7246 return LHSRank > RHSRank ? 1 : -1; 7247 } 7248 7249 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7250 if (LHSUnsigned) { 7251 // If the unsigned [LHS] type is larger, return it. 7252 if (LHSRank >= RHSRank) 7253 return 1; 7254 7255 // If the signed type can represent all values of the unsigned type, it 7256 // wins. Because we are dealing with 2's complement and types that are 7257 // powers of two larger than each other, this is always safe. 7258 return -1; 7259 } 7260 7261 // If the unsigned [RHS] type is larger, return it. 7262 if (RHSRank >= LHSRank) 7263 return -1; 7264 7265 // If the signed type can represent all values of the unsigned type, it 7266 // wins. Because we are dealing with 2's complement and types that are 7267 // powers of two larger than each other, this is always safe. 7268 return 1; 7269 } 7270 7271 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7272 if (CFConstantStringTypeDecl) 7273 return CFConstantStringTypeDecl; 7274 7275 assert(!CFConstantStringTagDecl && 7276 "tag and typedef should be initialized together"); 7277 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7278 CFConstantStringTagDecl->startDefinition(); 7279 7280 struct { 7281 QualType Type; 7282 const char *Name; 7283 } Fields[5]; 7284 unsigned Count = 0; 7285 7286 /// Objective-C ABI 7287 /// 7288 /// typedef struct __NSConstantString_tag { 7289 /// const int *isa; 7290 /// int flags; 7291 /// const char *str; 7292 /// long length; 7293 /// } __NSConstantString; 7294 /// 7295 /// Swift ABI (4.1, 4.2) 7296 /// 7297 /// typedef struct __NSConstantString_tag { 7298 /// uintptr_t _cfisa; 7299 /// uintptr_t _swift_rc; 7300 /// _Atomic(uint64_t) _cfinfoa; 7301 /// const char *_ptr; 7302 /// uint32_t _length; 7303 /// } __NSConstantString; 7304 /// 7305 /// Swift ABI (5.0) 7306 /// 7307 /// typedef struct __NSConstantString_tag { 7308 /// uintptr_t _cfisa; 7309 /// uintptr_t _swift_rc; 7310 /// _Atomic(uint64_t) _cfinfoa; 7311 /// const char *_ptr; 7312 /// uintptr_t _length; 7313 /// } __NSConstantString; 7314 7315 const auto CFRuntime = getLangOpts().CFRuntime; 7316 if (static_cast<unsigned>(CFRuntime) < 7317 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7318 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7319 Fields[Count++] = { IntTy, "flags" }; 7320 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7321 Fields[Count++] = { LongTy, "length" }; 7322 } else { 7323 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7324 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7325 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7326 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7327 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7328 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7329 Fields[Count++] = { IntTy, "_ptr" }; 7330 else 7331 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7332 } 7333 7334 // Create fields 7335 for (unsigned i = 0; i < Count; ++i) { 7336 FieldDecl *Field = 7337 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7338 SourceLocation(), &Idents.get(Fields[i].Name), 7339 Fields[i].Type, /*TInfo=*/nullptr, 7340 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7341 Field->setAccess(AS_public); 7342 CFConstantStringTagDecl->addDecl(Field); 7343 } 7344 7345 CFConstantStringTagDecl->completeDefinition(); 7346 // This type is designed to be compatible with NSConstantString, but cannot 7347 // use the same name, since NSConstantString is an interface. 7348 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7349 CFConstantStringTypeDecl = 7350 buildImplicitTypedef(tagType, "__NSConstantString"); 7351 7352 return CFConstantStringTypeDecl; 7353 } 7354 7355 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7356 if (!CFConstantStringTagDecl) 7357 getCFConstantStringDecl(); // Build the tag and the typedef. 7358 return CFConstantStringTagDecl; 7359 } 7360 7361 // getCFConstantStringType - Return the type used for constant CFStrings. 7362 QualType ASTContext::getCFConstantStringType() const { 7363 return getTypedefType(getCFConstantStringDecl()); 7364 } 7365 7366 QualType ASTContext::getObjCSuperType() const { 7367 if (ObjCSuperType.isNull()) { 7368 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7369 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7370 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7371 } 7372 return ObjCSuperType; 7373 } 7374 7375 void ASTContext::setCFConstantStringType(QualType T) { 7376 const auto *TD = T->castAs<TypedefType>(); 7377 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7378 const auto *TagType = 7379 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7380 CFConstantStringTagDecl = TagType->getDecl(); 7381 } 7382 7383 QualType ASTContext::getBlockDescriptorType() const { 7384 if (BlockDescriptorType) 7385 return getTagDeclType(BlockDescriptorType); 7386 7387 RecordDecl *RD; 7388 // FIXME: Needs the FlagAppleBlock bit. 7389 RD = buildImplicitRecord("__block_descriptor"); 7390 RD->startDefinition(); 7391 7392 QualType FieldTypes[] = { 7393 UnsignedLongTy, 7394 UnsignedLongTy, 7395 }; 7396 7397 static const char *const FieldNames[] = { 7398 "reserved", 7399 "Size" 7400 }; 7401 7402 for (size_t i = 0; i < 2; ++i) { 7403 FieldDecl *Field = FieldDecl::Create( 7404 *this, RD, SourceLocation(), SourceLocation(), 7405 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7406 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7407 Field->setAccess(AS_public); 7408 RD->addDecl(Field); 7409 } 7410 7411 RD->completeDefinition(); 7412 7413 BlockDescriptorType = RD; 7414 7415 return getTagDeclType(BlockDescriptorType); 7416 } 7417 7418 QualType ASTContext::getBlockDescriptorExtendedType() const { 7419 if (BlockDescriptorExtendedType) 7420 return getTagDeclType(BlockDescriptorExtendedType); 7421 7422 RecordDecl *RD; 7423 // FIXME: Needs the FlagAppleBlock bit. 7424 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7425 RD->startDefinition(); 7426 7427 QualType FieldTypes[] = { 7428 UnsignedLongTy, 7429 UnsignedLongTy, 7430 getPointerType(VoidPtrTy), 7431 getPointerType(VoidPtrTy) 7432 }; 7433 7434 static const char *const FieldNames[] = { 7435 "reserved", 7436 "Size", 7437 "CopyFuncPtr", 7438 "DestroyFuncPtr" 7439 }; 7440 7441 for (size_t i = 0; i < 4; ++i) { 7442 FieldDecl *Field = FieldDecl::Create( 7443 *this, RD, SourceLocation(), SourceLocation(), 7444 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7445 /*BitWidth=*/nullptr, 7446 /*Mutable=*/false, ICIS_NoInit); 7447 Field->setAccess(AS_public); 7448 RD->addDecl(Field); 7449 } 7450 7451 RD->completeDefinition(); 7452 7453 BlockDescriptorExtendedType = RD; 7454 return getTagDeclType(BlockDescriptorExtendedType); 7455 } 7456 7457 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7458 const auto *BT = dyn_cast<BuiltinType>(T); 7459 7460 if (!BT) { 7461 if (isa<PipeType>(T)) 7462 return OCLTK_Pipe; 7463 7464 return OCLTK_Default; 7465 } 7466 7467 switch (BT->getKind()) { 7468 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7469 case BuiltinType::Id: \ 7470 return OCLTK_Image; 7471 #include "clang/Basic/OpenCLImageTypes.def" 7472 7473 case BuiltinType::OCLClkEvent: 7474 return OCLTK_ClkEvent; 7475 7476 case BuiltinType::OCLEvent: 7477 return OCLTK_Event; 7478 7479 case BuiltinType::OCLQueue: 7480 return OCLTK_Queue; 7481 7482 case BuiltinType::OCLReserveID: 7483 return OCLTK_ReserveID; 7484 7485 case BuiltinType::OCLSampler: 7486 return OCLTK_Sampler; 7487 7488 default: 7489 return OCLTK_Default; 7490 } 7491 } 7492 7493 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7494 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7495 } 7496 7497 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7498 /// requires copy/dispose. Note that this must match the logic 7499 /// in buildByrefHelpers. 7500 bool ASTContext::BlockRequiresCopying(QualType Ty, 7501 const VarDecl *D) { 7502 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7503 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7504 if (!copyExpr && record->hasTrivialDestructor()) return false; 7505 7506 return true; 7507 } 7508 7509 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7510 // move or destroy. 7511 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7512 return true; 7513 7514 if (!Ty->isObjCRetainableType()) return false; 7515 7516 Qualifiers qs = Ty.getQualifiers(); 7517 7518 // If we have lifetime, that dominates. 7519 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7520 switch (lifetime) { 7521 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7522 7523 // These are just bits as far as the runtime is concerned. 7524 case Qualifiers::OCL_ExplicitNone: 7525 case Qualifiers::OCL_Autoreleasing: 7526 return false; 7527 7528 // These cases should have been taken care of when checking the type's 7529 // non-triviality. 7530 case Qualifiers::OCL_Weak: 7531 case Qualifiers::OCL_Strong: 7532 llvm_unreachable("impossible"); 7533 } 7534 llvm_unreachable("fell out of lifetime switch!"); 7535 } 7536 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7537 Ty->isObjCObjectPointerType()); 7538 } 7539 7540 bool ASTContext::getByrefLifetime(QualType Ty, 7541 Qualifiers::ObjCLifetime &LifeTime, 7542 bool &HasByrefExtendedLayout) const { 7543 if (!getLangOpts().ObjC || 7544 getLangOpts().getGC() != LangOptions::NonGC) 7545 return false; 7546 7547 HasByrefExtendedLayout = false; 7548 if (Ty->isRecordType()) { 7549 HasByrefExtendedLayout = true; 7550 LifeTime = Qualifiers::OCL_None; 7551 } else if ((LifeTime = Ty.getObjCLifetime())) { 7552 // Honor the ARC qualifiers. 7553 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7554 // The MRR rule. 7555 LifeTime = Qualifiers::OCL_ExplicitNone; 7556 } else { 7557 LifeTime = Qualifiers::OCL_None; 7558 } 7559 return true; 7560 } 7561 7562 CanQualType ASTContext::getNSUIntegerType() const { 7563 assert(Target && "Expected target to be initialized"); 7564 const llvm::Triple &T = Target->getTriple(); 7565 // Windows is LLP64 rather than LP64 7566 if (T.isOSWindows() && T.isArch64Bit()) 7567 return UnsignedLongLongTy; 7568 return UnsignedLongTy; 7569 } 7570 7571 CanQualType ASTContext::getNSIntegerType() const { 7572 assert(Target && "Expected target to be initialized"); 7573 const llvm::Triple &T = Target->getTriple(); 7574 // Windows is LLP64 rather than LP64 7575 if (T.isOSWindows() && T.isArch64Bit()) 7576 return LongLongTy; 7577 return LongTy; 7578 } 7579 7580 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7581 if (!ObjCInstanceTypeDecl) 7582 ObjCInstanceTypeDecl = 7583 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7584 return ObjCInstanceTypeDecl; 7585 } 7586 7587 // This returns true if a type has been typedefed to BOOL: 7588 // typedef <type> BOOL; 7589 static bool isTypeTypedefedAsBOOL(QualType T) { 7590 if (const auto *TT = dyn_cast<TypedefType>(T)) 7591 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7592 return II->isStr("BOOL"); 7593 7594 return false; 7595 } 7596 7597 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7598 /// purpose. 7599 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7600 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7601 return CharUnits::Zero(); 7602 7603 CharUnits sz = getTypeSizeInChars(type); 7604 7605 // Make all integer and enum types at least as large as an int 7606 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7607 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7608 // Treat arrays as pointers, since that's how they're passed in. 7609 else if (type->isArrayType()) 7610 sz = getTypeSizeInChars(VoidPtrTy); 7611 return sz; 7612 } 7613 7614 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7615 return getTargetInfo().getCXXABI().isMicrosoft() && 7616 VD->isStaticDataMember() && 7617 VD->getType()->isIntegralOrEnumerationType() && 7618 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7619 } 7620 7621 ASTContext::InlineVariableDefinitionKind 7622 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7623 if (!VD->isInline()) 7624 return InlineVariableDefinitionKind::None; 7625 7626 // In almost all cases, it's a weak definition. 7627 auto *First = VD->getFirstDecl(); 7628 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7629 return InlineVariableDefinitionKind::Weak; 7630 7631 // If there's a file-context declaration in this translation unit, it's a 7632 // non-discardable definition. 7633 for (auto *D : VD->redecls()) 7634 if (D->getLexicalDeclContext()->isFileContext() && 7635 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7636 return InlineVariableDefinitionKind::Strong; 7637 7638 // If we've not seen one yet, we don't know. 7639 return InlineVariableDefinitionKind::WeakUnknown; 7640 } 7641 7642 static std::string charUnitsToString(const CharUnits &CU) { 7643 return llvm::itostr(CU.getQuantity()); 7644 } 7645 7646 /// getObjCEncodingForBlock - Return the encoded type for this block 7647 /// declaration. 7648 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7649 std::string S; 7650 7651 const BlockDecl *Decl = Expr->getBlockDecl(); 7652 QualType BlockTy = 7653 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7654 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7655 // Encode result type. 7656 if (getLangOpts().EncodeExtendedBlockSig) 7657 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7658 true /*Extended*/); 7659 else 7660 getObjCEncodingForType(BlockReturnTy, S); 7661 // Compute size of all parameters. 7662 // Start with computing size of a pointer in number of bytes. 7663 // FIXME: There might(should) be a better way of doing this computation! 7664 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7665 CharUnits ParmOffset = PtrSize; 7666 for (auto *PI : Decl->parameters()) { 7667 QualType PType = PI->getType(); 7668 CharUnits sz = getObjCEncodingTypeSize(PType); 7669 if (sz.isZero()) 7670 continue; 7671 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7672 ParmOffset += sz; 7673 } 7674 // Size of the argument frame 7675 S += charUnitsToString(ParmOffset); 7676 // Block pointer and offset. 7677 S += "@?0"; 7678 7679 // Argument types. 7680 ParmOffset = PtrSize; 7681 for (auto *PVDecl : Decl->parameters()) { 7682 QualType PType = PVDecl->getOriginalType(); 7683 if (const auto *AT = 7684 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7685 // Use array's original type only if it has known number of 7686 // elements. 7687 if (!isa<ConstantArrayType>(AT)) 7688 PType = PVDecl->getType(); 7689 } else if (PType->isFunctionType()) 7690 PType = PVDecl->getType(); 7691 if (getLangOpts().EncodeExtendedBlockSig) 7692 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7693 S, true /*Extended*/); 7694 else 7695 getObjCEncodingForType(PType, S); 7696 S += charUnitsToString(ParmOffset); 7697 ParmOffset += getObjCEncodingTypeSize(PType); 7698 } 7699 7700 return S; 7701 } 7702 7703 std::string 7704 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7705 std::string S; 7706 // Encode result type. 7707 getObjCEncodingForType(Decl->getReturnType(), S); 7708 CharUnits ParmOffset; 7709 // Compute size of all parameters. 7710 for (auto *PI : Decl->parameters()) { 7711 QualType PType = PI->getType(); 7712 CharUnits sz = getObjCEncodingTypeSize(PType); 7713 if (sz.isZero()) 7714 continue; 7715 7716 assert(sz.isPositive() && 7717 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7718 ParmOffset += sz; 7719 } 7720 S += charUnitsToString(ParmOffset); 7721 ParmOffset = CharUnits::Zero(); 7722 7723 // Argument types. 7724 for (auto *PVDecl : Decl->parameters()) { 7725 QualType PType = PVDecl->getOriginalType(); 7726 if (const auto *AT = 7727 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7728 // Use array's original type only if it has known number of 7729 // elements. 7730 if (!isa<ConstantArrayType>(AT)) 7731 PType = PVDecl->getType(); 7732 } else if (PType->isFunctionType()) 7733 PType = PVDecl->getType(); 7734 getObjCEncodingForType(PType, S); 7735 S += charUnitsToString(ParmOffset); 7736 ParmOffset += getObjCEncodingTypeSize(PType); 7737 } 7738 7739 return S; 7740 } 7741 7742 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7743 /// method parameter or return type. If Extended, include class names and 7744 /// block object types. 7745 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7746 QualType T, std::string& S, 7747 bool Extended) const { 7748 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7749 getObjCEncodingForTypeQualifier(QT, S); 7750 // Encode parameter type. 7751 ObjCEncOptions Options = ObjCEncOptions() 7752 .setExpandPointedToStructures() 7753 .setExpandStructures() 7754 .setIsOutermostType(); 7755 if (Extended) 7756 Options.setEncodeBlockParameters().setEncodeClassNames(); 7757 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7758 } 7759 7760 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7761 /// declaration. 7762 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7763 bool Extended) const { 7764 // FIXME: This is not very efficient. 7765 // Encode return type. 7766 std::string S; 7767 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7768 Decl->getReturnType(), S, Extended); 7769 // Compute size of all parameters. 7770 // Start with computing size of a pointer in number of bytes. 7771 // FIXME: There might(should) be a better way of doing this computation! 7772 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7773 // The first two arguments (self and _cmd) are pointers; account for 7774 // their size. 7775 CharUnits ParmOffset = 2 * PtrSize; 7776 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7777 E = Decl->sel_param_end(); PI != E; ++PI) { 7778 QualType PType = (*PI)->getType(); 7779 CharUnits sz = getObjCEncodingTypeSize(PType); 7780 if (sz.isZero()) 7781 continue; 7782 7783 assert(sz.isPositive() && 7784 "getObjCEncodingForMethodDecl - Incomplete param type"); 7785 ParmOffset += sz; 7786 } 7787 S += charUnitsToString(ParmOffset); 7788 S += "@0:"; 7789 S += charUnitsToString(PtrSize); 7790 7791 // Argument types. 7792 ParmOffset = 2 * PtrSize; 7793 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7794 E = Decl->sel_param_end(); PI != E; ++PI) { 7795 const ParmVarDecl *PVDecl = *PI; 7796 QualType PType = PVDecl->getOriginalType(); 7797 if (const auto *AT = 7798 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7799 // Use array's original type only if it has known number of 7800 // elements. 7801 if (!isa<ConstantArrayType>(AT)) 7802 PType = PVDecl->getType(); 7803 } else if (PType->isFunctionType()) 7804 PType = PVDecl->getType(); 7805 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7806 PType, S, Extended); 7807 S += charUnitsToString(ParmOffset); 7808 ParmOffset += getObjCEncodingTypeSize(PType); 7809 } 7810 7811 return S; 7812 } 7813 7814 ObjCPropertyImplDecl * 7815 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7816 const ObjCPropertyDecl *PD, 7817 const Decl *Container) const { 7818 if (!Container) 7819 return nullptr; 7820 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7821 for (auto *PID : CID->property_impls()) 7822 if (PID->getPropertyDecl() == PD) 7823 return PID; 7824 } else { 7825 const auto *OID = cast<ObjCImplementationDecl>(Container); 7826 for (auto *PID : OID->property_impls()) 7827 if (PID->getPropertyDecl() == PD) 7828 return PID; 7829 } 7830 return nullptr; 7831 } 7832 7833 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7834 /// property declaration. If non-NULL, Container must be either an 7835 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7836 /// NULL when getting encodings for protocol properties. 7837 /// Property attributes are stored as a comma-delimited C string. The simple 7838 /// attributes readonly and bycopy are encoded as single characters. The 7839 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7840 /// encoded as single characters, followed by an identifier. Property types 7841 /// are also encoded as a parametrized attribute. The characters used to encode 7842 /// these attributes are defined by the following enumeration: 7843 /// @code 7844 /// enum PropertyAttributes { 7845 /// kPropertyReadOnly = 'R', // property is read-only. 7846 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7847 /// kPropertyByref = '&', // property is a reference to the value last assigned 7848 /// kPropertyDynamic = 'D', // property is dynamic 7849 /// kPropertyGetter = 'G', // followed by getter selector name 7850 /// kPropertySetter = 'S', // followed by setter selector name 7851 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7852 /// kPropertyType = 'T' // followed by old-style type encoding. 7853 /// kPropertyWeak = 'W' // 'weak' property 7854 /// kPropertyStrong = 'P' // property GC'able 7855 /// kPropertyNonAtomic = 'N' // property non-atomic 7856 /// kPropertyOptional = '?' // property optional 7857 /// }; 7858 /// @endcode 7859 std::string 7860 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7861 const Decl *Container) const { 7862 // Collect information from the property implementation decl(s). 7863 bool Dynamic = false; 7864 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7865 7866 if (ObjCPropertyImplDecl *PropertyImpDecl = 7867 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7868 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7869 Dynamic = true; 7870 else 7871 SynthesizePID = PropertyImpDecl; 7872 } 7873 7874 // FIXME: This is not very efficient. 7875 std::string S = "T"; 7876 7877 // Encode result type. 7878 // GCC has some special rules regarding encoding of properties which 7879 // closely resembles encoding of ivars. 7880 getObjCEncodingForPropertyType(PD->getType(), S); 7881 7882 if (PD->isOptional()) 7883 S += ",?"; 7884 7885 if (PD->isReadOnly()) { 7886 S += ",R"; 7887 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7888 S += ",C"; 7889 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7890 S += ",&"; 7891 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7892 S += ",W"; 7893 } else { 7894 switch (PD->getSetterKind()) { 7895 case ObjCPropertyDecl::Assign: break; 7896 case ObjCPropertyDecl::Copy: S += ",C"; break; 7897 case ObjCPropertyDecl::Retain: S += ",&"; break; 7898 case ObjCPropertyDecl::Weak: S += ",W"; break; 7899 } 7900 } 7901 7902 // It really isn't clear at all what this means, since properties 7903 // are "dynamic by default". 7904 if (Dynamic) 7905 S += ",D"; 7906 7907 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7908 S += ",N"; 7909 7910 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7911 S += ",G"; 7912 S += PD->getGetterName().getAsString(); 7913 } 7914 7915 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7916 S += ",S"; 7917 S += PD->getSetterName().getAsString(); 7918 } 7919 7920 if (SynthesizePID) { 7921 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7922 S += ",V"; 7923 S += OID->getNameAsString(); 7924 } 7925 7926 // FIXME: OBJCGC: weak & strong 7927 return S; 7928 } 7929 7930 /// getLegacyIntegralTypeEncoding - 7931 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7932 /// 'l' or 'L' , but not always. For typedefs, we need to use 7933 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7934 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7935 if (PointeeTy->getAs<TypedefType>()) { 7936 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7937 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7938 PointeeTy = UnsignedIntTy; 7939 else 7940 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7941 PointeeTy = IntTy; 7942 } 7943 } 7944 } 7945 7946 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7947 const FieldDecl *Field, 7948 QualType *NotEncodedT) const { 7949 // We follow the behavior of gcc, expanding structures which are 7950 // directly pointed to, and expanding embedded structures. Note that 7951 // these rules are sufficient to prevent recursive encoding of the 7952 // same type. 7953 getObjCEncodingForTypeImpl(T, S, 7954 ObjCEncOptions() 7955 .setExpandPointedToStructures() 7956 .setExpandStructures() 7957 .setIsOutermostType(), 7958 Field, NotEncodedT); 7959 } 7960 7961 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7962 std::string& S) const { 7963 // Encode result type. 7964 // GCC has some special rules regarding encoding of properties which 7965 // closely resembles encoding of ivars. 7966 getObjCEncodingForTypeImpl(T, S, 7967 ObjCEncOptions() 7968 .setExpandPointedToStructures() 7969 .setExpandStructures() 7970 .setIsOutermostType() 7971 .setEncodingProperty(), 7972 /*Field=*/nullptr); 7973 } 7974 7975 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7976 const BuiltinType *BT) { 7977 BuiltinType::Kind kind = BT->getKind(); 7978 switch (kind) { 7979 case BuiltinType::Void: return 'v'; 7980 case BuiltinType::Bool: return 'B'; 7981 case BuiltinType::Char8: 7982 case BuiltinType::Char_U: 7983 case BuiltinType::UChar: return 'C'; 7984 case BuiltinType::Char16: 7985 case BuiltinType::UShort: return 'S'; 7986 case BuiltinType::Char32: 7987 case BuiltinType::UInt: return 'I'; 7988 case BuiltinType::ULong: 7989 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7990 case BuiltinType::UInt128: return 'T'; 7991 case BuiltinType::ULongLong: return 'Q'; 7992 case BuiltinType::Char_S: 7993 case BuiltinType::SChar: return 'c'; 7994 case BuiltinType::Short: return 's'; 7995 case BuiltinType::WChar_S: 7996 case BuiltinType::WChar_U: 7997 case BuiltinType::Int: return 'i'; 7998 case BuiltinType::Long: 7999 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 8000 case BuiltinType::LongLong: return 'q'; 8001 case BuiltinType::Int128: return 't'; 8002 case BuiltinType::Float: return 'f'; 8003 case BuiltinType::Double: return 'd'; 8004 case BuiltinType::LongDouble: return 'D'; 8005 case BuiltinType::NullPtr: return '*'; // like char* 8006 8007 case BuiltinType::BFloat16: 8008 case BuiltinType::Float16: 8009 case BuiltinType::Float128: 8010 case BuiltinType::Ibm128: 8011 case BuiltinType::Half: 8012 case BuiltinType::ShortAccum: 8013 case BuiltinType::Accum: 8014 case BuiltinType::LongAccum: 8015 case BuiltinType::UShortAccum: 8016 case BuiltinType::UAccum: 8017 case BuiltinType::ULongAccum: 8018 case BuiltinType::ShortFract: 8019 case BuiltinType::Fract: 8020 case BuiltinType::LongFract: 8021 case BuiltinType::UShortFract: 8022 case BuiltinType::UFract: 8023 case BuiltinType::ULongFract: 8024 case BuiltinType::SatShortAccum: 8025 case BuiltinType::SatAccum: 8026 case BuiltinType::SatLongAccum: 8027 case BuiltinType::SatUShortAccum: 8028 case BuiltinType::SatUAccum: 8029 case BuiltinType::SatULongAccum: 8030 case BuiltinType::SatShortFract: 8031 case BuiltinType::SatFract: 8032 case BuiltinType::SatLongFract: 8033 case BuiltinType::SatUShortFract: 8034 case BuiltinType::SatUFract: 8035 case BuiltinType::SatULongFract: 8036 // FIXME: potentially need @encodes for these! 8037 return ' '; 8038 8039 #define SVE_TYPE(Name, Id, SingletonId) \ 8040 case BuiltinType::Id: 8041 #include "clang/Basic/AArch64SVEACLETypes.def" 8042 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8043 #include "clang/Basic/RISCVVTypes.def" 8044 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8045 #include "clang/Basic/WebAssemblyReferenceTypes.def" 8046 { 8047 DiagnosticsEngine &Diags = C->getDiagnostics(); 8048 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 8049 "cannot yet @encode type %0"); 8050 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 8051 return ' '; 8052 } 8053 8054 case BuiltinType::ObjCId: 8055 case BuiltinType::ObjCClass: 8056 case BuiltinType::ObjCSel: 8057 llvm_unreachable("@encoding ObjC primitive type"); 8058 8059 // OpenCL and placeholder types don't need @encodings. 8060 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 8061 case BuiltinType::Id: 8062 #include "clang/Basic/OpenCLImageTypes.def" 8063 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 8064 case BuiltinType::Id: 8065 #include "clang/Basic/OpenCLExtensionTypes.def" 8066 case BuiltinType::OCLEvent: 8067 case BuiltinType::OCLClkEvent: 8068 case BuiltinType::OCLQueue: 8069 case BuiltinType::OCLReserveID: 8070 case BuiltinType::OCLSampler: 8071 case BuiltinType::Dependent: 8072 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 8073 case BuiltinType::Id: 8074 #include "clang/Basic/PPCTypes.def" 8075 #define BUILTIN_TYPE(KIND, ID) 8076 #define PLACEHOLDER_TYPE(KIND, ID) \ 8077 case BuiltinType::KIND: 8078 #include "clang/AST/BuiltinTypes.def" 8079 llvm_unreachable("invalid builtin type for @encode"); 8080 } 8081 llvm_unreachable("invalid BuiltinType::Kind value"); 8082 } 8083 8084 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 8085 EnumDecl *Enum = ET->getDecl(); 8086 8087 // The encoding of an non-fixed enum type is always 'i', regardless of size. 8088 if (!Enum->isFixed()) 8089 return 'i'; 8090 8091 // The encoding of a fixed enum type matches its fixed underlying type. 8092 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 8093 return getObjCEncodingForPrimitiveType(C, BT); 8094 } 8095 8096 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 8097 QualType T, const FieldDecl *FD) { 8098 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 8099 S += 'b'; 8100 // The NeXT runtime encodes bit fields as b followed by the number of bits. 8101 // The GNU runtime requires more information; bitfields are encoded as b, 8102 // then the offset (in bits) of the first element, then the type of the 8103 // bitfield, then the size in bits. For example, in this structure: 8104 // 8105 // struct 8106 // { 8107 // int integer; 8108 // int flags:2; 8109 // }; 8110 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 8111 // runtime, but b32i2 for the GNU runtime. The reason for this extra 8112 // information is not especially sensible, but we're stuck with it for 8113 // compatibility with GCC, although providing it breaks anything that 8114 // actually uses runtime introspection and wants to work on both runtimes... 8115 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 8116 uint64_t Offset; 8117 8118 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 8119 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 8120 IVD); 8121 } else { 8122 const RecordDecl *RD = FD->getParent(); 8123 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 8124 Offset = RL.getFieldOffset(FD->getFieldIndex()); 8125 } 8126 8127 S += llvm::utostr(Offset); 8128 8129 if (const auto *ET = T->getAs<EnumType>()) 8130 S += ObjCEncodingForEnumType(Ctx, ET); 8131 else { 8132 const auto *BT = T->castAs<BuiltinType>(); 8133 S += getObjCEncodingForPrimitiveType(Ctx, BT); 8134 } 8135 } 8136 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 8137 } 8138 8139 // Helper function for determining whether the encoded type string would include 8140 // a template specialization type. 8141 static bool hasTemplateSpecializationInEncodedString(const Type *T, 8142 bool VisitBasesAndFields) { 8143 T = T->getBaseElementTypeUnsafe(); 8144 8145 if (auto *PT = T->getAs<PointerType>()) 8146 return hasTemplateSpecializationInEncodedString( 8147 PT->getPointeeType().getTypePtr(), false); 8148 8149 auto *CXXRD = T->getAsCXXRecordDecl(); 8150 8151 if (!CXXRD) 8152 return false; 8153 8154 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 8155 return true; 8156 8157 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 8158 return false; 8159 8160 for (const auto &B : CXXRD->bases()) 8161 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 8162 true)) 8163 return true; 8164 8165 for (auto *FD : CXXRD->fields()) 8166 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 8167 true)) 8168 return true; 8169 8170 return false; 8171 } 8172 8173 // FIXME: Use SmallString for accumulating string. 8174 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 8175 const ObjCEncOptions Options, 8176 const FieldDecl *FD, 8177 QualType *NotEncodedT) const { 8178 CanQualType CT = getCanonicalType(T); 8179 switch (CT->getTypeClass()) { 8180 case Type::Builtin: 8181 case Type::Enum: 8182 if (FD && FD->isBitField()) 8183 return EncodeBitField(this, S, T, FD); 8184 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 8185 S += getObjCEncodingForPrimitiveType(this, BT); 8186 else 8187 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8188 return; 8189 8190 case Type::Complex: 8191 S += 'j'; 8192 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8193 ObjCEncOptions(), 8194 /*Field=*/nullptr); 8195 return; 8196 8197 case Type::Atomic: 8198 S += 'A'; 8199 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8200 ObjCEncOptions(), 8201 /*Field=*/nullptr); 8202 return; 8203 8204 // encoding for pointer or reference types. 8205 case Type::Pointer: 8206 case Type::LValueReference: 8207 case Type::RValueReference: { 8208 QualType PointeeTy; 8209 if (isa<PointerType>(CT)) { 8210 const auto *PT = T->castAs<PointerType>(); 8211 if (PT->isObjCSelType()) { 8212 S += ':'; 8213 return; 8214 } 8215 PointeeTy = PT->getPointeeType(); 8216 } else { 8217 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8218 } 8219 8220 bool isReadOnly = false; 8221 // For historical/compatibility reasons, the read-only qualifier of the 8222 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8223 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8224 // Also, do not emit the 'r' for anything but the outermost type! 8225 if (T->getAs<TypedefType>()) { 8226 if (Options.IsOutermostType() && T.isConstQualified()) { 8227 isReadOnly = true; 8228 S += 'r'; 8229 } 8230 } else if (Options.IsOutermostType()) { 8231 QualType P = PointeeTy; 8232 while (auto PT = P->getAs<PointerType>()) 8233 P = PT->getPointeeType(); 8234 if (P.isConstQualified()) { 8235 isReadOnly = true; 8236 S += 'r'; 8237 } 8238 } 8239 if (isReadOnly) { 8240 // Another legacy compatibility encoding. Some ObjC qualifier and type 8241 // combinations need to be rearranged. 8242 // Rewrite "in const" from "nr" to "rn" 8243 if (StringRef(S).ends_with("nr")) 8244 S.replace(S.end()-2, S.end(), "rn"); 8245 } 8246 8247 if (PointeeTy->isCharType()) { 8248 // char pointer types should be encoded as '*' unless it is a 8249 // type that has been typedef'd to 'BOOL'. 8250 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8251 S += '*'; 8252 return; 8253 } 8254 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8255 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8256 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8257 S += '#'; 8258 return; 8259 } 8260 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8261 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8262 S += '@'; 8263 return; 8264 } 8265 // If the encoded string for the class includes template names, just emit 8266 // "^v" for pointers to the class. 8267 if (getLangOpts().CPlusPlus && 8268 (!getLangOpts().EncodeCXXClassTemplateSpec && 8269 hasTemplateSpecializationInEncodedString( 8270 RTy, Options.ExpandPointedToStructures()))) { 8271 S += "^v"; 8272 return; 8273 } 8274 // fall through... 8275 } 8276 S += '^'; 8277 getLegacyIntegralTypeEncoding(PointeeTy); 8278 8279 ObjCEncOptions NewOptions; 8280 if (Options.ExpandPointedToStructures()) 8281 NewOptions.setExpandStructures(); 8282 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8283 /*Field=*/nullptr, NotEncodedT); 8284 return; 8285 } 8286 8287 case Type::ConstantArray: 8288 case Type::IncompleteArray: 8289 case Type::VariableArray: { 8290 const auto *AT = cast<ArrayType>(CT); 8291 8292 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8293 // Incomplete arrays are encoded as a pointer to the array element. 8294 S += '^'; 8295 8296 getObjCEncodingForTypeImpl( 8297 AT->getElementType(), S, 8298 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8299 } else { 8300 S += '['; 8301 8302 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8303 S += llvm::utostr(CAT->getSize().getZExtValue()); 8304 else { 8305 //Variable length arrays are encoded as a regular array with 0 elements. 8306 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8307 "Unknown array type!"); 8308 S += '0'; 8309 } 8310 8311 getObjCEncodingForTypeImpl( 8312 AT->getElementType(), S, 8313 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8314 NotEncodedT); 8315 S += ']'; 8316 } 8317 return; 8318 } 8319 8320 case Type::FunctionNoProto: 8321 case Type::FunctionProto: 8322 S += '?'; 8323 return; 8324 8325 case Type::Record: { 8326 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8327 S += RDecl->isUnion() ? '(' : '{'; 8328 // Anonymous structures print as '?' 8329 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8330 S += II->getName(); 8331 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8332 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8333 llvm::raw_string_ostream OS(S); 8334 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8335 getPrintingPolicy()); 8336 } 8337 } else { 8338 S += '?'; 8339 } 8340 if (Options.ExpandStructures()) { 8341 S += '='; 8342 if (!RDecl->isUnion()) { 8343 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8344 } else { 8345 for (const auto *Field : RDecl->fields()) { 8346 if (FD) { 8347 S += '"'; 8348 S += Field->getNameAsString(); 8349 S += '"'; 8350 } 8351 8352 // Special case bit-fields. 8353 if (Field->isBitField()) { 8354 getObjCEncodingForTypeImpl(Field->getType(), S, 8355 ObjCEncOptions().setExpandStructures(), 8356 Field); 8357 } else { 8358 QualType qt = Field->getType(); 8359 getLegacyIntegralTypeEncoding(qt); 8360 getObjCEncodingForTypeImpl( 8361 qt, S, 8362 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8363 NotEncodedT); 8364 } 8365 } 8366 } 8367 } 8368 S += RDecl->isUnion() ? ')' : '}'; 8369 return; 8370 } 8371 8372 case Type::BlockPointer: { 8373 const auto *BT = T->castAs<BlockPointerType>(); 8374 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8375 if (Options.EncodeBlockParameters()) { 8376 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8377 8378 S += '<'; 8379 // Block return type 8380 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8381 Options.forComponentType(), FD, NotEncodedT); 8382 // Block self 8383 S += "@?"; 8384 // Block parameters 8385 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8386 for (const auto &I : FPT->param_types()) 8387 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8388 NotEncodedT); 8389 } 8390 S += '>'; 8391 } 8392 return; 8393 } 8394 8395 case Type::ObjCObject: { 8396 // hack to match legacy encoding of *id and *Class 8397 QualType Ty = getObjCObjectPointerType(CT); 8398 if (Ty->isObjCIdType()) { 8399 S += "{objc_object=}"; 8400 return; 8401 } 8402 else if (Ty->isObjCClassType()) { 8403 S += "{objc_class=}"; 8404 return; 8405 } 8406 // TODO: Double check to make sure this intentionally falls through. 8407 [[fallthrough]]; 8408 } 8409 8410 case Type::ObjCInterface: { 8411 // Ignore protocol qualifiers when mangling at this level. 8412 // @encode(class_name) 8413 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8414 S += '{'; 8415 S += OI->getObjCRuntimeNameAsString(); 8416 if (Options.ExpandStructures()) { 8417 S += '='; 8418 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8419 DeepCollectObjCIvars(OI, true, Ivars); 8420 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8421 const FieldDecl *Field = Ivars[i]; 8422 if (Field->isBitField()) 8423 getObjCEncodingForTypeImpl(Field->getType(), S, 8424 ObjCEncOptions().setExpandStructures(), 8425 Field); 8426 else 8427 getObjCEncodingForTypeImpl(Field->getType(), S, 8428 ObjCEncOptions().setExpandStructures(), FD, 8429 NotEncodedT); 8430 } 8431 } 8432 S += '}'; 8433 return; 8434 } 8435 8436 case Type::ObjCObjectPointer: { 8437 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8438 if (OPT->isObjCIdType()) { 8439 S += '@'; 8440 return; 8441 } 8442 8443 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8444 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8445 // Since this is a binary compatibility issue, need to consult with 8446 // runtime folks. Fortunately, this is a *very* obscure construct. 8447 S += '#'; 8448 return; 8449 } 8450 8451 if (OPT->isObjCQualifiedIdType()) { 8452 getObjCEncodingForTypeImpl( 8453 getObjCIdType(), S, 8454 Options.keepingOnly(ObjCEncOptions() 8455 .setExpandPointedToStructures() 8456 .setExpandStructures()), 8457 FD); 8458 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8459 // Note that we do extended encoding of protocol qualifier list 8460 // Only when doing ivar or property encoding. 8461 S += '"'; 8462 for (const auto *I : OPT->quals()) { 8463 S += '<'; 8464 S += I->getObjCRuntimeNameAsString(); 8465 S += '>'; 8466 } 8467 S += '"'; 8468 } 8469 return; 8470 } 8471 8472 S += '@'; 8473 if (OPT->getInterfaceDecl() && 8474 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8475 S += '"'; 8476 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8477 for (const auto *I : OPT->quals()) { 8478 S += '<'; 8479 S += I->getObjCRuntimeNameAsString(); 8480 S += '>'; 8481 } 8482 S += '"'; 8483 } 8484 return; 8485 } 8486 8487 // gcc just blithely ignores member pointers. 8488 // FIXME: we should do better than that. 'M' is available. 8489 case Type::MemberPointer: 8490 // This matches gcc's encoding, even though technically it is insufficient. 8491 //FIXME. We should do a better job than gcc. 8492 case Type::Vector: 8493 case Type::ExtVector: 8494 // Until we have a coherent encoding of these three types, issue warning. 8495 if (NotEncodedT) 8496 *NotEncodedT = T; 8497 return; 8498 8499 case Type::ConstantMatrix: 8500 if (NotEncodedT) 8501 *NotEncodedT = T; 8502 return; 8503 8504 case Type::BitInt: 8505 if (NotEncodedT) 8506 *NotEncodedT = T; 8507 return; 8508 8509 // We could see an undeduced auto type here during error recovery. 8510 // Just ignore it. 8511 case Type::Auto: 8512 case Type::DeducedTemplateSpecialization: 8513 return; 8514 8515 case Type::Pipe: 8516 #define ABSTRACT_TYPE(KIND, BASE) 8517 #define TYPE(KIND, BASE) 8518 #define DEPENDENT_TYPE(KIND, BASE) \ 8519 case Type::KIND: 8520 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8521 case Type::KIND: 8522 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8523 case Type::KIND: 8524 #include "clang/AST/TypeNodes.inc" 8525 llvm_unreachable("@encode for dependent type!"); 8526 } 8527 llvm_unreachable("bad type kind!"); 8528 } 8529 8530 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8531 std::string &S, 8532 const FieldDecl *FD, 8533 bool includeVBases, 8534 QualType *NotEncodedT) const { 8535 assert(RDecl && "Expected non-null RecordDecl"); 8536 assert(!RDecl->isUnion() && "Should not be called for unions"); 8537 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8538 return; 8539 8540 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8541 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8542 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8543 8544 if (CXXRec) { 8545 for (const auto &BI : CXXRec->bases()) { 8546 if (!BI.isVirtual()) { 8547 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8548 if (base->isEmpty()) 8549 continue; 8550 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8551 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8552 std::make_pair(offs, base)); 8553 } 8554 } 8555 } 8556 8557 for (FieldDecl *Field : RDecl->fields()) { 8558 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8559 continue; 8560 uint64_t offs = layout.getFieldOffset(Field->getFieldIndex()); 8561 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8562 std::make_pair(offs, Field)); 8563 } 8564 8565 if (CXXRec && includeVBases) { 8566 for (const auto &BI : CXXRec->vbases()) { 8567 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8568 if (base->isEmpty()) 8569 continue; 8570 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8571 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8572 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8573 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8574 std::make_pair(offs, base)); 8575 } 8576 } 8577 8578 CharUnits size; 8579 if (CXXRec) { 8580 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8581 } else { 8582 size = layout.getSize(); 8583 } 8584 8585 #ifndef NDEBUG 8586 uint64_t CurOffs = 0; 8587 #endif 8588 std::multimap<uint64_t, NamedDecl *>::iterator 8589 CurLayObj = FieldOrBaseOffsets.begin(); 8590 8591 if (CXXRec && CXXRec->isDynamicClass() && 8592 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8593 if (FD) { 8594 S += "\"_vptr$"; 8595 std::string recname = CXXRec->getNameAsString(); 8596 if (recname.empty()) recname = "?"; 8597 S += recname; 8598 S += '"'; 8599 } 8600 S += "^^?"; 8601 #ifndef NDEBUG 8602 CurOffs += getTypeSize(VoidPtrTy); 8603 #endif 8604 } 8605 8606 if (!RDecl->hasFlexibleArrayMember()) { 8607 // Mark the end of the structure. 8608 uint64_t offs = toBits(size); 8609 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8610 std::make_pair(offs, nullptr)); 8611 } 8612 8613 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8614 #ifndef NDEBUG 8615 assert(CurOffs <= CurLayObj->first); 8616 if (CurOffs < CurLayObj->first) { 8617 uint64_t padding = CurLayObj->first - CurOffs; 8618 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8619 // packing/alignment of members is different that normal, in which case 8620 // the encoding will be out-of-sync with the real layout. 8621 // If the runtime switches to just consider the size of types without 8622 // taking into account alignment, we could make padding explicit in the 8623 // encoding (e.g. using arrays of chars). The encoding strings would be 8624 // longer then though. 8625 CurOffs += padding; 8626 } 8627 #endif 8628 8629 NamedDecl *dcl = CurLayObj->second; 8630 if (!dcl) 8631 break; // reached end of structure. 8632 8633 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8634 // We expand the bases without their virtual bases since those are going 8635 // in the initial structure. Note that this differs from gcc which 8636 // expands virtual bases each time one is encountered in the hierarchy, 8637 // making the encoding type bigger than it really is. 8638 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8639 NotEncodedT); 8640 assert(!base->isEmpty()); 8641 #ifndef NDEBUG 8642 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8643 #endif 8644 } else { 8645 const auto *field = cast<FieldDecl>(dcl); 8646 if (FD) { 8647 S += '"'; 8648 S += field->getNameAsString(); 8649 S += '"'; 8650 } 8651 8652 if (field->isBitField()) { 8653 EncodeBitField(this, S, field->getType(), field); 8654 #ifndef NDEBUG 8655 CurOffs += field->getBitWidthValue(*this); 8656 #endif 8657 } else { 8658 QualType qt = field->getType(); 8659 getLegacyIntegralTypeEncoding(qt); 8660 getObjCEncodingForTypeImpl( 8661 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8662 FD, NotEncodedT); 8663 #ifndef NDEBUG 8664 CurOffs += getTypeSize(field->getType()); 8665 #endif 8666 } 8667 } 8668 } 8669 } 8670 8671 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8672 std::string& S) const { 8673 if (QT & Decl::OBJC_TQ_In) 8674 S += 'n'; 8675 if (QT & Decl::OBJC_TQ_Inout) 8676 S += 'N'; 8677 if (QT & Decl::OBJC_TQ_Out) 8678 S += 'o'; 8679 if (QT & Decl::OBJC_TQ_Bycopy) 8680 S += 'O'; 8681 if (QT & Decl::OBJC_TQ_Byref) 8682 S += 'R'; 8683 if (QT & Decl::OBJC_TQ_Oneway) 8684 S += 'V'; 8685 } 8686 8687 TypedefDecl *ASTContext::getObjCIdDecl() const { 8688 if (!ObjCIdDecl) { 8689 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8690 T = getObjCObjectPointerType(T); 8691 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8692 } 8693 return ObjCIdDecl; 8694 } 8695 8696 TypedefDecl *ASTContext::getObjCSelDecl() const { 8697 if (!ObjCSelDecl) { 8698 QualType T = getPointerType(ObjCBuiltinSelTy); 8699 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8700 } 8701 return ObjCSelDecl; 8702 } 8703 8704 TypedefDecl *ASTContext::getObjCClassDecl() const { 8705 if (!ObjCClassDecl) { 8706 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8707 T = getObjCObjectPointerType(T); 8708 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8709 } 8710 return ObjCClassDecl; 8711 } 8712 8713 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8714 if (!ObjCProtocolClassDecl) { 8715 ObjCProtocolClassDecl 8716 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8717 SourceLocation(), 8718 &Idents.get("Protocol"), 8719 /*typeParamList=*/nullptr, 8720 /*PrevDecl=*/nullptr, 8721 SourceLocation(), true); 8722 } 8723 8724 return ObjCProtocolClassDecl; 8725 } 8726 8727 //===----------------------------------------------------------------------===// 8728 // __builtin_va_list Construction Functions 8729 //===----------------------------------------------------------------------===// 8730 8731 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8732 StringRef Name) { 8733 // typedef char* __builtin[_ms]_va_list; 8734 QualType T = Context->getPointerType(Context->CharTy); 8735 return Context->buildImplicitTypedef(T, Name); 8736 } 8737 8738 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8739 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8740 } 8741 8742 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8743 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8744 } 8745 8746 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8747 // typedef void* __builtin_va_list; 8748 QualType T = Context->getPointerType(Context->VoidTy); 8749 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8750 } 8751 8752 static TypedefDecl * 8753 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8754 // struct __va_list 8755 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8756 if (Context->getLangOpts().CPlusPlus) { 8757 // namespace std { struct __va_list { 8758 auto *NS = NamespaceDecl::Create( 8759 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8760 /*Inline=*/false, SourceLocation(), SourceLocation(), 8761 &Context->Idents.get("std"), 8762 /*PrevDecl=*/nullptr, /*Nested=*/false); 8763 NS->setImplicit(); 8764 VaListTagDecl->setDeclContext(NS); 8765 } 8766 8767 VaListTagDecl->startDefinition(); 8768 8769 const size_t NumFields = 5; 8770 QualType FieldTypes[NumFields]; 8771 const char *FieldNames[NumFields]; 8772 8773 // void *__stack; 8774 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8775 FieldNames[0] = "__stack"; 8776 8777 // void *__gr_top; 8778 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8779 FieldNames[1] = "__gr_top"; 8780 8781 // void *__vr_top; 8782 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8783 FieldNames[2] = "__vr_top"; 8784 8785 // int __gr_offs; 8786 FieldTypes[3] = Context->IntTy; 8787 FieldNames[3] = "__gr_offs"; 8788 8789 // int __vr_offs; 8790 FieldTypes[4] = Context->IntTy; 8791 FieldNames[4] = "__vr_offs"; 8792 8793 // Create fields 8794 for (unsigned i = 0; i < NumFields; ++i) { 8795 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8796 VaListTagDecl, 8797 SourceLocation(), 8798 SourceLocation(), 8799 &Context->Idents.get(FieldNames[i]), 8800 FieldTypes[i], /*TInfo=*/nullptr, 8801 /*BitWidth=*/nullptr, 8802 /*Mutable=*/false, 8803 ICIS_NoInit); 8804 Field->setAccess(AS_public); 8805 VaListTagDecl->addDecl(Field); 8806 } 8807 VaListTagDecl->completeDefinition(); 8808 Context->VaListTagDecl = VaListTagDecl; 8809 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8810 8811 // } __builtin_va_list; 8812 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8813 } 8814 8815 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8816 // typedef struct __va_list_tag { 8817 RecordDecl *VaListTagDecl; 8818 8819 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8820 VaListTagDecl->startDefinition(); 8821 8822 const size_t NumFields = 5; 8823 QualType FieldTypes[NumFields]; 8824 const char *FieldNames[NumFields]; 8825 8826 // unsigned char gpr; 8827 FieldTypes[0] = Context->UnsignedCharTy; 8828 FieldNames[0] = "gpr"; 8829 8830 // unsigned char fpr; 8831 FieldTypes[1] = Context->UnsignedCharTy; 8832 FieldNames[1] = "fpr"; 8833 8834 // unsigned short reserved; 8835 FieldTypes[2] = Context->UnsignedShortTy; 8836 FieldNames[2] = "reserved"; 8837 8838 // void* overflow_arg_area; 8839 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8840 FieldNames[3] = "overflow_arg_area"; 8841 8842 // void* reg_save_area; 8843 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8844 FieldNames[4] = "reg_save_area"; 8845 8846 // Create fields 8847 for (unsigned i = 0; i < NumFields; ++i) { 8848 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8849 SourceLocation(), 8850 SourceLocation(), 8851 &Context->Idents.get(FieldNames[i]), 8852 FieldTypes[i], /*TInfo=*/nullptr, 8853 /*BitWidth=*/nullptr, 8854 /*Mutable=*/false, 8855 ICIS_NoInit); 8856 Field->setAccess(AS_public); 8857 VaListTagDecl->addDecl(Field); 8858 } 8859 VaListTagDecl->completeDefinition(); 8860 Context->VaListTagDecl = VaListTagDecl; 8861 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8862 8863 // } __va_list_tag; 8864 TypedefDecl *VaListTagTypedefDecl = 8865 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8866 8867 QualType VaListTagTypedefType = 8868 Context->getTypedefType(VaListTagTypedefDecl); 8869 8870 // typedef __va_list_tag __builtin_va_list[1]; 8871 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8872 QualType VaListTagArrayType = Context->getConstantArrayType( 8873 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 8874 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8875 } 8876 8877 static TypedefDecl * 8878 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8879 // struct __va_list_tag { 8880 RecordDecl *VaListTagDecl; 8881 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8882 VaListTagDecl->startDefinition(); 8883 8884 const size_t NumFields = 4; 8885 QualType FieldTypes[NumFields]; 8886 const char *FieldNames[NumFields]; 8887 8888 // unsigned gp_offset; 8889 FieldTypes[0] = Context->UnsignedIntTy; 8890 FieldNames[0] = "gp_offset"; 8891 8892 // unsigned fp_offset; 8893 FieldTypes[1] = Context->UnsignedIntTy; 8894 FieldNames[1] = "fp_offset"; 8895 8896 // void* overflow_arg_area; 8897 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8898 FieldNames[2] = "overflow_arg_area"; 8899 8900 // void* reg_save_area; 8901 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8902 FieldNames[3] = "reg_save_area"; 8903 8904 // Create fields 8905 for (unsigned i = 0; i < NumFields; ++i) { 8906 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8907 VaListTagDecl, 8908 SourceLocation(), 8909 SourceLocation(), 8910 &Context->Idents.get(FieldNames[i]), 8911 FieldTypes[i], /*TInfo=*/nullptr, 8912 /*BitWidth=*/nullptr, 8913 /*Mutable=*/false, 8914 ICIS_NoInit); 8915 Field->setAccess(AS_public); 8916 VaListTagDecl->addDecl(Field); 8917 } 8918 VaListTagDecl->completeDefinition(); 8919 Context->VaListTagDecl = VaListTagDecl; 8920 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8921 8922 // }; 8923 8924 // typedef struct __va_list_tag __builtin_va_list[1]; 8925 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8926 QualType VaListTagArrayType = Context->getConstantArrayType( 8927 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 8928 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8929 } 8930 8931 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8932 // typedef int __builtin_va_list[4]; 8933 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8934 QualType IntArrayType = Context->getConstantArrayType( 8935 Context->IntTy, Size, nullptr, ArraySizeModifier::Normal, 0); 8936 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8937 } 8938 8939 static TypedefDecl * 8940 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8941 // struct __va_list 8942 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8943 if (Context->getLangOpts().CPlusPlus) { 8944 // namespace std { struct __va_list { 8945 NamespaceDecl *NS; 8946 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8947 Context->getTranslationUnitDecl(), 8948 /*Inline=*/false, SourceLocation(), 8949 SourceLocation(), &Context->Idents.get("std"), 8950 /*PrevDecl=*/nullptr, /*Nested=*/false); 8951 NS->setImplicit(); 8952 VaListDecl->setDeclContext(NS); 8953 } 8954 8955 VaListDecl->startDefinition(); 8956 8957 // void * __ap; 8958 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8959 VaListDecl, 8960 SourceLocation(), 8961 SourceLocation(), 8962 &Context->Idents.get("__ap"), 8963 Context->getPointerType(Context->VoidTy), 8964 /*TInfo=*/nullptr, 8965 /*BitWidth=*/nullptr, 8966 /*Mutable=*/false, 8967 ICIS_NoInit); 8968 Field->setAccess(AS_public); 8969 VaListDecl->addDecl(Field); 8970 8971 // }; 8972 VaListDecl->completeDefinition(); 8973 Context->VaListTagDecl = VaListDecl; 8974 8975 // typedef struct __va_list __builtin_va_list; 8976 QualType T = Context->getRecordType(VaListDecl); 8977 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8978 } 8979 8980 static TypedefDecl * 8981 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8982 // struct __va_list_tag { 8983 RecordDecl *VaListTagDecl; 8984 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8985 VaListTagDecl->startDefinition(); 8986 8987 const size_t NumFields = 4; 8988 QualType FieldTypes[NumFields]; 8989 const char *FieldNames[NumFields]; 8990 8991 // long __gpr; 8992 FieldTypes[0] = Context->LongTy; 8993 FieldNames[0] = "__gpr"; 8994 8995 // long __fpr; 8996 FieldTypes[1] = Context->LongTy; 8997 FieldNames[1] = "__fpr"; 8998 8999 // void *__overflow_arg_area; 9000 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9001 FieldNames[2] = "__overflow_arg_area"; 9002 9003 // void *__reg_save_area; 9004 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 9005 FieldNames[3] = "__reg_save_area"; 9006 9007 // Create fields 9008 for (unsigned i = 0; i < NumFields; ++i) { 9009 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 9010 VaListTagDecl, 9011 SourceLocation(), 9012 SourceLocation(), 9013 &Context->Idents.get(FieldNames[i]), 9014 FieldTypes[i], /*TInfo=*/nullptr, 9015 /*BitWidth=*/nullptr, 9016 /*Mutable=*/false, 9017 ICIS_NoInit); 9018 Field->setAccess(AS_public); 9019 VaListTagDecl->addDecl(Field); 9020 } 9021 VaListTagDecl->completeDefinition(); 9022 Context->VaListTagDecl = VaListTagDecl; 9023 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9024 9025 // }; 9026 9027 // typedef __va_list_tag __builtin_va_list[1]; 9028 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9029 QualType VaListTagArrayType = Context->getConstantArrayType( 9030 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 9031 9032 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9033 } 9034 9035 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 9036 // typedef struct __va_list_tag { 9037 RecordDecl *VaListTagDecl; 9038 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9039 VaListTagDecl->startDefinition(); 9040 9041 const size_t NumFields = 3; 9042 QualType FieldTypes[NumFields]; 9043 const char *FieldNames[NumFields]; 9044 9045 // void *CurrentSavedRegisterArea; 9046 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 9047 FieldNames[0] = "__current_saved_reg_area_pointer"; 9048 9049 // void *SavedRegAreaEnd; 9050 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 9051 FieldNames[1] = "__saved_reg_area_end_pointer"; 9052 9053 // void *OverflowArea; 9054 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9055 FieldNames[2] = "__overflow_area_pointer"; 9056 9057 // Create fields 9058 for (unsigned i = 0; i < NumFields; ++i) { 9059 FieldDecl *Field = FieldDecl::Create( 9060 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 9061 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 9062 /*TInfo=*/nullptr, 9063 /*BitWidth=*/nullptr, 9064 /*Mutable=*/false, ICIS_NoInit); 9065 Field->setAccess(AS_public); 9066 VaListTagDecl->addDecl(Field); 9067 } 9068 VaListTagDecl->completeDefinition(); 9069 Context->VaListTagDecl = VaListTagDecl; 9070 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9071 9072 // } __va_list_tag; 9073 TypedefDecl *VaListTagTypedefDecl = 9074 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 9075 9076 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 9077 9078 // typedef __va_list_tag __builtin_va_list[1]; 9079 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9080 QualType VaListTagArrayType = Context->getConstantArrayType( 9081 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 9082 9083 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9084 } 9085 9086 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 9087 TargetInfo::BuiltinVaListKind Kind) { 9088 switch (Kind) { 9089 case TargetInfo::CharPtrBuiltinVaList: 9090 return CreateCharPtrBuiltinVaListDecl(Context); 9091 case TargetInfo::VoidPtrBuiltinVaList: 9092 return CreateVoidPtrBuiltinVaListDecl(Context); 9093 case TargetInfo::AArch64ABIBuiltinVaList: 9094 return CreateAArch64ABIBuiltinVaListDecl(Context); 9095 case TargetInfo::PowerABIBuiltinVaList: 9096 return CreatePowerABIBuiltinVaListDecl(Context); 9097 case TargetInfo::X86_64ABIBuiltinVaList: 9098 return CreateX86_64ABIBuiltinVaListDecl(Context); 9099 case TargetInfo::PNaClABIBuiltinVaList: 9100 return CreatePNaClABIBuiltinVaListDecl(Context); 9101 case TargetInfo::AAPCSABIBuiltinVaList: 9102 return CreateAAPCSABIBuiltinVaListDecl(Context); 9103 case TargetInfo::SystemZBuiltinVaList: 9104 return CreateSystemZBuiltinVaListDecl(Context); 9105 case TargetInfo::HexagonBuiltinVaList: 9106 return CreateHexagonBuiltinVaListDecl(Context); 9107 } 9108 9109 llvm_unreachable("Unhandled __builtin_va_list type kind"); 9110 } 9111 9112 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 9113 if (!BuiltinVaListDecl) { 9114 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 9115 assert(BuiltinVaListDecl->isImplicit()); 9116 } 9117 9118 return BuiltinVaListDecl; 9119 } 9120 9121 Decl *ASTContext::getVaListTagDecl() const { 9122 // Force the creation of VaListTagDecl by building the __builtin_va_list 9123 // declaration. 9124 if (!VaListTagDecl) 9125 (void)getBuiltinVaListDecl(); 9126 9127 return VaListTagDecl; 9128 } 9129 9130 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 9131 if (!BuiltinMSVaListDecl) 9132 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 9133 9134 return BuiltinMSVaListDecl; 9135 } 9136 9137 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 9138 // Allow redecl custom type checking builtin for HLSL. 9139 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin && 9140 BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID())) 9141 return true; 9142 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 9143 } 9144 9145 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 9146 assert(ObjCConstantStringType.isNull() && 9147 "'NSConstantString' type already set!"); 9148 9149 ObjCConstantStringType = getObjCInterfaceType(Decl); 9150 } 9151 9152 /// Retrieve the template name that corresponds to a non-empty 9153 /// lookup. 9154 TemplateName 9155 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 9156 UnresolvedSetIterator End) const { 9157 unsigned size = End - Begin; 9158 assert(size > 1 && "set is not overloaded!"); 9159 9160 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 9161 size * sizeof(FunctionTemplateDecl*)); 9162 auto *OT = new (memory) OverloadedTemplateStorage(size); 9163 9164 NamedDecl **Storage = OT->getStorage(); 9165 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 9166 NamedDecl *D = *I; 9167 assert(isa<FunctionTemplateDecl>(D) || 9168 isa<UnresolvedUsingValueDecl>(D) || 9169 (isa<UsingShadowDecl>(D) && 9170 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 9171 *Storage++ = D; 9172 } 9173 9174 return TemplateName(OT); 9175 } 9176 9177 /// Retrieve a template name representing an unqualified-id that has been 9178 /// assumed to name a template for ADL purposes. 9179 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 9180 auto *OT = new (*this) AssumedTemplateStorage(Name); 9181 return TemplateName(OT); 9182 } 9183 9184 /// Retrieve the template name that represents a qualified 9185 /// template name such as \c std::vector. 9186 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 9187 bool TemplateKeyword, 9188 TemplateName Template) const { 9189 assert(NNS && "Missing nested-name-specifier in qualified template name"); 9190 9191 // FIXME: Canonicalization? 9192 llvm::FoldingSetNodeID ID; 9193 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9194 9195 void *InsertPos = nullptr; 9196 QualifiedTemplateName *QTN = 9197 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9198 if (!QTN) { 9199 QTN = new (*this, alignof(QualifiedTemplateName)) 9200 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9201 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9202 } 9203 9204 return TemplateName(QTN); 9205 } 9206 9207 /// Retrieve the template name that represents a dependent 9208 /// template name such as \c MetaFun::template apply. 9209 TemplateName 9210 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9211 const IdentifierInfo *Name) const { 9212 assert((!NNS || NNS->isDependent()) && 9213 "Nested name specifier must be dependent"); 9214 9215 llvm::FoldingSetNodeID ID; 9216 DependentTemplateName::Profile(ID, NNS, Name); 9217 9218 void *InsertPos = nullptr; 9219 DependentTemplateName *QTN = 9220 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9221 9222 if (QTN) 9223 return TemplateName(QTN); 9224 9225 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9226 if (CanonNNS == NNS) { 9227 QTN = new (*this, alignof(DependentTemplateName)) 9228 DependentTemplateName(NNS, Name); 9229 } else { 9230 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9231 QTN = new (*this, alignof(DependentTemplateName)) 9232 DependentTemplateName(NNS, Name, Canon); 9233 DependentTemplateName *CheckQTN = 9234 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9235 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9236 (void)CheckQTN; 9237 } 9238 9239 DependentTemplateNames.InsertNode(QTN, InsertPos); 9240 return TemplateName(QTN); 9241 } 9242 9243 /// Retrieve the template name that represents a dependent 9244 /// template name such as \c MetaFun::template operator+. 9245 TemplateName 9246 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9247 OverloadedOperatorKind Operator) const { 9248 assert((!NNS || NNS->isDependent()) && 9249 "Nested name specifier must be dependent"); 9250 9251 llvm::FoldingSetNodeID ID; 9252 DependentTemplateName::Profile(ID, NNS, Operator); 9253 9254 void *InsertPos = nullptr; 9255 DependentTemplateName *QTN 9256 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9257 9258 if (QTN) 9259 return TemplateName(QTN); 9260 9261 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9262 if (CanonNNS == NNS) { 9263 QTN = new (*this, alignof(DependentTemplateName)) 9264 DependentTemplateName(NNS, Operator); 9265 } else { 9266 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9267 QTN = new (*this, alignof(DependentTemplateName)) 9268 DependentTemplateName(NNS, Operator, Canon); 9269 9270 DependentTemplateName *CheckQTN 9271 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9272 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9273 (void)CheckQTN; 9274 } 9275 9276 DependentTemplateNames.InsertNode(QTN, InsertPos); 9277 return TemplateName(QTN); 9278 } 9279 9280 TemplateName ASTContext::getSubstTemplateTemplateParm( 9281 TemplateName Replacement, Decl *AssociatedDecl, unsigned Index, 9282 std::optional<unsigned> PackIndex) const { 9283 llvm::FoldingSetNodeID ID; 9284 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl, 9285 Index, PackIndex); 9286 9287 void *insertPos = nullptr; 9288 SubstTemplateTemplateParmStorage *subst 9289 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9290 9291 if (!subst) { 9292 subst = new (*this) SubstTemplateTemplateParmStorage( 9293 Replacement, AssociatedDecl, Index, PackIndex); 9294 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9295 } 9296 9297 return TemplateName(subst); 9298 } 9299 9300 TemplateName 9301 ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack, 9302 Decl *AssociatedDecl, 9303 unsigned Index, bool Final) const { 9304 auto &Self = const_cast<ASTContext &>(*this); 9305 llvm::FoldingSetNodeID ID; 9306 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack, 9307 AssociatedDecl, Index, Final); 9308 9309 void *InsertPos = nullptr; 9310 SubstTemplateTemplateParmPackStorage *Subst 9311 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9312 9313 if (!Subst) { 9314 Subst = new (*this) SubstTemplateTemplateParmPackStorage( 9315 ArgPack.pack_elements(), AssociatedDecl, Index, Final); 9316 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9317 } 9318 9319 return TemplateName(Subst); 9320 } 9321 9322 /// getFromTargetType - Given one of the integer types provided by 9323 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9324 /// is actually a value of type @c TargetInfo::IntType. 9325 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9326 switch (Type) { 9327 case TargetInfo::NoInt: return {}; 9328 case TargetInfo::SignedChar: return SignedCharTy; 9329 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9330 case TargetInfo::SignedShort: return ShortTy; 9331 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9332 case TargetInfo::SignedInt: return IntTy; 9333 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9334 case TargetInfo::SignedLong: return LongTy; 9335 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9336 case TargetInfo::SignedLongLong: return LongLongTy; 9337 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9338 } 9339 9340 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9341 } 9342 9343 //===----------------------------------------------------------------------===// 9344 // Type Predicates. 9345 //===----------------------------------------------------------------------===// 9346 9347 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9348 /// garbage collection attribute. 9349 /// 9350 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9351 if (getLangOpts().getGC() == LangOptions::NonGC) 9352 return Qualifiers::GCNone; 9353 9354 assert(getLangOpts().ObjC); 9355 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9356 9357 // Default behaviour under objective-C's gc is for ObjC pointers 9358 // (or pointers to them) be treated as though they were declared 9359 // as __strong. 9360 if (GCAttrs == Qualifiers::GCNone) { 9361 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9362 return Qualifiers::Strong; 9363 else if (Ty->isPointerType()) 9364 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9365 } else { 9366 // It's not valid to set GC attributes on anything that isn't a 9367 // pointer. 9368 #ifndef NDEBUG 9369 QualType CT = Ty->getCanonicalTypeInternal(); 9370 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9371 CT = AT->getElementType(); 9372 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9373 #endif 9374 } 9375 return GCAttrs; 9376 } 9377 9378 //===----------------------------------------------------------------------===// 9379 // Type Compatibility Testing 9380 //===----------------------------------------------------------------------===// 9381 9382 /// areCompatVectorTypes - Return true if the two specified vector types are 9383 /// compatible. 9384 static bool areCompatVectorTypes(const VectorType *LHS, 9385 const VectorType *RHS) { 9386 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9387 return LHS->getElementType() == RHS->getElementType() && 9388 LHS->getNumElements() == RHS->getNumElements(); 9389 } 9390 9391 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9392 /// compatible. 9393 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9394 const ConstantMatrixType *RHS) { 9395 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9396 return LHS->getElementType() == RHS->getElementType() && 9397 LHS->getNumRows() == RHS->getNumRows() && 9398 LHS->getNumColumns() == RHS->getNumColumns(); 9399 } 9400 9401 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9402 QualType SecondVec) { 9403 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9404 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9405 9406 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9407 return true; 9408 9409 // Treat Neon vector types and most AltiVec vector types as if they are the 9410 // equivalent GCC vector types. 9411 const auto *First = FirstVec->castAs<VectorType>(); 9412 const auto *Second = SecondVec->castAs<VectorType>(); 9413 if (First->getNumElements() == Second->getNumElements() && 9414 hasSameType(First->getElementType(), Second->getElementType()) && 9415 First->getVectorKind() != VectorKind::AltiVecPixel && 9416 First->getVectorKind() != VectorKind::AltiVecBool && 9417 Second->getVectorKind() != VectorKind::AltiVecPixel && 9418 Second->getVectorKind() != VectorKind::AltiVecBool && 9419 First->getVectorKind() != VectorKind::SveFixedLengthData && 9420 First->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9421 Second->getVectorKind() != VectorKind::SveFixedLengthData && 9422 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9423 First->getVectorKind() != VectorKind::RVVFixedLengthData && 9424 Second->getVectorKind() != VectorKind::RVVFixedLengthData && 9425 First->getVectorKind() != VectorKind::RVVFixedLengthMask && 9426 Second->getVectorKind() != VectorKind::RVVFixedLengthMask) 9427 return true; 9428 9429 return false; 9430 } 9431 9432 /// getSVETypeSize - Return SVE vector or predicate register size. 9433 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9434 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type"); 9435 if (Ty->getKind() == BuiltinType::SveBool || 9436 Ty->getKind() == BuiltinType::SveCount) 9437 return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth(); 9438 return Context.getLangOpts().VScaleMin * 128; 9439 } 9440 9441 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9442 QualType SecondType) { 9443 assert( 9444 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9445 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9446 "Expected SVE builtin type and vector type!"); 9447 9448 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9449 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9450 if (const auto *VT = SecondType->getAs<VectorType>()) { 9451 // Predicates have the same representation as uint8 so we also have to 9452 // check the kind to make these types incompatible. 9453 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 9454 return BT->getKind() == BuiltinType::SveBool; 9455 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 9456 return VT->getElementType().getCanonicalType() == 9457 FirstType->getSveEltType(*this); 9458 else if (VT->getVectorKind() == VectorKind::Generic) 9459 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9460 hasSameType(VT->getElementType(), 9461 getBuiltinVectorTypeInfo(BT).ElementType); 9462 } 9463 } 9464 return false; 9465 }; 9466 9467 return IsValidCast(FirstType, SecondType) || 9468 IsValidCast(SecondType, FirstType); 9469 } 9470 9471 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9472 QualType SecondType) { 9473 assert( 9474 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9475 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9476 "Expected SVE builtin type and vector type!"); 9477 9478 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9479 const auto *BT = FirstType->getAs<BuiltinType>(); 9480 if (!BT) 9481 return false; 9482 9483 const auto *VecTy = SecondType->getAs<VectorType>(); 9484 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData || 9485 VecTy->getVectorKind() == VectorKind::Generic)) { 9486 const LangOptions::LaxVectorConversionKind LVCKind = 9487 getLangOpts().getLaxVectorConversions(); 9488 9489 // Can not convert between sve predicates and sve vectors because of 9490 // different size. 9491 if (BT->getKind() == BuiltinType::SveBool && 9492 VecTy->getVectorKind() == VectorKind::SveFixedLengthData) 9493 return false; 9494 9495 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9496 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9497 // converts to VLAT and VLAT implicitly converts to GNUT." 9498 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9499 // predicates. 9500 if (VecTy->getVectorKind() == VectorKind::Generic && 9501 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9502 return false; 9503 9504 // If -flax-vector-conversions=all is specified, the types are 9505 // certainly compatible. 9506 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9507 return true; 9508 9509 // If -flax-vector-conversions=integer is specified, the types are 9510 // compatible if the elements are integer types. 9511 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9512 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9513 FirstType->getSveEltType(*this)->isIntegerType(); 9514 } 9515 9516 return false; 9517 }; 9518 9519 return IsLaxCompatible(FirstType, SecondType) || 9520 IsLaxCompatible(SecondType, FirstType); 9521 } 9522 9523 /// getRVVTypeSize - Return RVV vector register size. 9524 static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) { 9525 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type"); 9526 auto VScale = Context.getTargetInfo().getVScaleRange(Context.getLangOpts()); 9527 if (!VScale) 9528 return 0; 9529 9530 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty); 9531 9532 unsigned EltSize = Context.getTypeSize(Info.ElementType); 9533 if (Info.ElementType == Context.BoolTy) 9534 EltSize = 1; 9535 9536 unsigned MinElts = Info.EC.getKnownMinValue(); 9537 return VScale->first * MinElts * EltSize; 9538 } 9539 9540 bool ASTContext::areCompatibleRVVTypes(QualType FirstType, 9541 QualType SecondType) { 9542 assert( 9543 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9544 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9545 "Expected RVV builtin type and vector type!"); 9546 9547 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9548 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9549 if (const auto *VT = SecondType->getAs<VectorType>()) { 9550 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) { 9551 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(BT); 9552 return FirstType->isRVVVLSBuiltinType() && 9553 Info.ElementType == BoolTy && 9554 getTypeSize(SecondType) == getRVVTypeSize(*this, BT); 9555 } 9556 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || 9557 VT->getVectorKind() == VectorKind::Generic) 9558 return FirstType->isRVVVLSBuiltinType() && 9559 getTypeSize(SecondType) == getRVVTypeSize(*this, BT) && 9560 hasSameType(VT->getElementType(), 9561 getBuiltinVectorTypeInfo(BT).ElementType); 9562 } 9563 } 9564 return false; 9565 }; 9566 9567 return IsValidCast(FirstType, SecondType) || 9568 IsValidCast(SecondType, FirstType); 9569 } 9570 9571 bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType, 9572 QualType SecondType) { 9573 assert( 9574 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9575 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9576 "Expected RVV builtin type and vector type!"); 9577 9578 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9579 const auto *BT = FirstType->getAs<BuiltinType>(); 9580 if (!BT) 9581 return false; 9582 9583 if (!BT->isRVVVLSBuiltinType()) 9584 return false; 9585 9586 const auto *VecTy = SecondType->getAs<VectorType>(); 9587 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) { 9588 const LangOptions::LaxVectorConversionKind LVCKind = 9589 getLangOpts().getLaxVectorConversions(); 9590 9591 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion. 9592 if (getTypeSize(SecondType) != getRVVTypeSize(*this, BT)) 9593 return false; 9594 9595 // If -flax-vector-conversions=all is specified, the types are 9596 // certainly compatible. 9597 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9598 return true; 9599 9600 // If -flax-vector-conversions=integer is specified, the types are 9601 // compatible if the elements are integer types. 9602 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9603 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9604 FirstType->getRVVEltType(*this)->isIntegerType(); 9605 } 9606 9607 return false; 9608 }; 9609 9610 return IsLaxCompatible(FirstType, SecondType) || 9611 IsLaxCompatible(SecondType, FirstType); 9612 } 9613 9614 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9615 while (true) { 9616 // __strong id 9617 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9618 if (Attr->getAttrKind() == attr::ObjCOwnership) 9619 return true; 9620 9621 Ty = Attr->getModifiedType(); 9622 9623 // X *__strong (...) 9624 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9625 Ty = Paren->getInnerType(); 9626 9627 // We do not want to look through typedefs, typeof(expr), 9628 // typeof(type), or any other way that the type is somehow 9629 // abstracted. 9630 } else { 9631 return false; 9632 } 9633 } 9634 } 9635 9636 //===----------------------------------------------------------------------===// 9637 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9638 //===----------------------------------------------------------------------===// 9639 9640 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9641 /// inheritance hierarchy of 'rProto'. 9642 bool 9643 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9644 ObjCProtocolDecl *rProto) const { 9645 if (declaresSameEntity(lProto, rProto)) 9646 return true; 9647 for (auto *PI : rProto->protocols()) 9648 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9649 return true; 9650 return false; 9651 } 9652 9653 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9654 /// Class<pr1, ...>. 9655 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9656 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9657 for (auto *lhsProto : lhs->quals()) { 9658 bool match = false; 9659 for (auto *rhsProto : rhs->quals()) { 9660 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9661 match = true; 9662 break; 9663 } 9664 } 9665 if (!match) 9666 return false; 9667 } 9668 return true; 9669 } 9670 9671 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9672 /// ObjCQualifiedIDType. 9673 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9674 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9675 bool compare) { 9676 // Allow id<P..> and an 'id' in all cases. 9677 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9678 return true; 9679 9680 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9681 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9682 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9683 return false; 9684 9685 if (lhs->isObjCQualifiedIdType()) { 9686 if (rhs->qual_empty()) { 9687 // If the RHS is a unqualified interface pointer "NSString*", 9688 // make sure we check the class hierarchy. 9689 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9690 for (auto *I : lhs->quals()) { 9691 // when comparing an id<P> on lhs with a static type on rhs, 9692 // see if static class implements all of id's protocols, directly or 9693 // through its super class and categories. 9694 if (!rhsID->ClassImplementsProtocol(I, true)) 9695 return false; 9696 } 9697 } 9698 // If there are no qualifiers and no interface, we have an 'id'. 9699 return true; 9700 } 9701 // Both the right and left sides have qualifiers. 9702 for (auto *lhsProto : lhs->quals()) { 9703 bool match = false; 9704 9705 // when comparing an id<P> on lhs with a static type on rhs, 9706 // see if static class implements all of id's protocols, directly or 9707 // through its super class and categories. 9708 for (auto *rhsProto : rhs->quals()) { 9709 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9710 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9711 match = true; 9712 break; 9713 } 9714 } 9715 // If the RHS is a qualified interface pointer "NSString<P>*", 9716 // make sure we check the class hierarchy. 9717 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9718 for (auto *I : lhs->quals()) { 9719 // when comparing an id<P> on lhs with a static type on rhs, 9720 // see if static class implements all of id's protocols, directly or 9721 // through its super class and categories. 9722 if (rhsID->ClassImplementsProtocol(I, true)) { 9723 match = true; 9724 break; 9725 } 9726 } 9727 } 9728 if (!match) 9729 return false; 9730 } 9731 9732 return true; 9733 } 9734 9735 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9736 9737 if (lhs->getInterfaceType()) { 9738 // If both the right and left sides have qualifiers. 9739 for (auto *lhsProto : lhs->quals()) { 9740 bool match = false; 9741 9742 // when comparing an id<P> on rhs with a static type on lhs, 9743 // see if static class implements all of id's protocols, directly or 9744 // through its super class and categories. 9745 // First, lhs protocols in the qualifier list must be found, direct 9746 // or indirect in rhs's qualifier list or it is a mismatch. 9747 for (auto *rhsProto : rhs->quals()) { 9748 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9749 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9750 match = true; 9751 break; 9752 } 9753 } 9754 if (!match) 9755 return false; 9756 } 9757 9758 // Static class's protocols, or its super class or category protocols 9759 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9760 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9761 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9762 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9763 // This is rather dubious but matches gcc's behavior. If lhs has 9764 // no type qualifier and its class has no static protocol(s) 9765 // assume that it is mismatch. 9766 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9767 return false; 9768 for (auto *lhsProto : LHSInheritedProtocols) { 9769 bool match = false; 9770 for (auto *rhsProto : rhs->quals()) { 9771 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9772 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9773 match = true; 9774 break; 9775 } 9776 } 9777 if (!match) 9778 return false; 9779 } 9780 } 9781 return true; 9782 } 9783 return false; 9784 } 9785 9786 /// canAssignObjCInterfaces - Return true if the two interface types are 9787 /// compatible for assignment from RHS to LHS. This handles validation of any 9788 /// protocol qualifiers on the LHS or RHS. 9789 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9790 const ObjCObjectPointerType *RHSOPT) { 9791 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9792 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9793 9794 // If either type represents the built-in 'id' type, return true. 9795 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9796 return true; 9797 9798 // Function object that propagates a successful result or handles 9799 // __kindof types. 9800 auto finish = [&](bool succeeded) -> bool { 9801 if (succeeded) 9802 return true; 9803 9804 if (!RHS->isKindOfType()) 9805 return false; 9806 9807 // Strip off __kindof and protocol qualifiers, then check whether 9808 // we can assign the other way. 9809 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9810 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9811 }; 9812 9813 // Casts from or to id<P> are allowed when the other side has compatible 9814 // protocols. 9815 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9816 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9817 } 9818 9819 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9820 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9821 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9822 } 9823 9824 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9825 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9826 return true; 9827 } 9828 9829 // If we have 2 user-defined types, fall into that path. 9830 if (LHS->getInterface() && RHS->getInterface()) { 9831 return finish(canAssignObjCInterfaces(LHS, RHS)); 9832 } 9833 9834 return false; 9835 } 9836 9837 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9838 /// for providing type-safety for objective-c pointers used to pass/return 9839 /// arguments in block literals. When passed as arguments, passing 'A*' where 9840 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9841 /// not OK. For the return type, the opposite is not OK. 9842 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9843 const ObjCObjectPointerType *LHSOPT, 9844 const ObjCObjectPointerType *RHSOPT, 9845 bool BlockReturnType) { 9846 9847 // Function object that propagates a successful result or handles 9848 // __kindof types. 9849 auto finish = [&](bool succeeded) -> bool { 9850 if (succeeded) 9851 return true; 9852 9853 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9854 if (!Expected->isKindOfType()) 9855 return false; 9856 9857 // Strip off __kindof and protocol qualifiers, then check whether 9858 // we can assign the other way. 9859 return canAssignObjCInterfacesInBlockPointer( 9860 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9861 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9862 BlockReturnType); 9863 }; 9864 9865 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9866 return true; 9867 9868 if (LHSOPT->isObjCBuiltinType()) { 9869 return finish(RHSOPT->isObjCBuiltinType() || 9870 RHSOPT->isObjCQualifiedIdType()); 9871 } 9872 9873 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9874 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9875 // Use for block parameters previous type checking for compatibility. 9876 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9877 // Or corrected type checking as in non-compat mode. 9878 (!BlockReturnType && 9879 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9880 else 9881 return finish(ObjCQualifiedIdTypesAreCompatible( 9882 (BlockReturnType ? LHSOPT : RHSOPT), 9883 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9884 } 9885 9886 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9887 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9888 if (LHS && RHS) { // We have 2 user-defined types. 9889 if (LHS != RHS) { 9890 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9891 return finish(BlockReturnType); 9892 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9893 return finish(!BlockReturnType); 9894 } 9895 else 9896 return true; 9897 } 9898 return false; 9899 } 9900 9901 /// Comparison routine for Objective-C protocols to be used with 9902 /// llvm::array_pod_sort. 9903 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9904 ObjCProtocolDecl * const *rhs) { 9905 return (*lhs)->getName().compare((*rhs)->getName()); 9906 } 9907 9908 /// getIntersectionOfProtocols - This routine finds the intersection of set 9909 /// of protocols inherited from two distinct objective-c pointer objects with 9910 /// the given common base. 9911 /// It is used to build composite qualifier list of the composite type of 9912 /// the conditional expression involving two objective-c pointer objects. 9913 static 9914 void getIntersectionOfProtocols(ASTContext &Context, 9915 const ObjCInterfaceDecl *CommonBase, 9916 const ObjCObjectPointerType *LHSOPT, 9917 const ObjCObjectPointerType *RHSOPT, 9918 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9919 9920 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9921 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9922 assert(LHS->getInterface() && "LHS must have an interface base"); 9923 assert(RHS->getInterface() && "RHS must have an interface base"); 9924 9925 // Add all of the protocols for the LHS. 9926 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9927 9928 // Start with the protocol qualifiers. 9929 for (auto *proto : LHS->quals()) { 9930 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9931 } 9932 9933 // Also add the protocols associated with the LHS interface. 9934 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9935 9936 // Add all of the protocols for the RHS. 9937 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9938 9939 // Start with the protocol qualifiers. 9940 for (auto *proto : RHS->quals()) { 9941 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9942 } 9943 9944 // Also add the protocols associated with the RHS interface. 9945 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9946 9947 // Compute the intersection of the collected protocol sets. 9948 for (auto *proto : LHSProtocolSet) { 9949 if (RHSProtocolSet.count(proto)) 9950 IntersectionSet.push_back(proto); 9951 } 9952 9953 // Compute the set of protocols that is implied by either the common type or 9954 // the protocols within the intersection. 9955 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9956 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9957 9958 // Remove any implied protocols from the list of inherited protocols. 9959 if (!ImpliedProtocols.empty()) { 9960 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9961 return ImpliedProtocols.contains(proto); 9962 }); 9963 } 9964 9965 // Sort the remaining protocols by name. 9966 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9967 compareObjCProtocolsByName); 9968 } 9969 9970 /// Determine whether the first type is a subtype of the second. 9971 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9972 QualType rhs) { 9973 // Common case: two object pointers. 9974 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9975 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9976 if (lhsOPT && rhsOPT) 9977 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9978 9979 // Two block pointers. 9980 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9981 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9982 if (lhsBlock && rhsBlock) 9983 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9984 9985 // If either is an unqualified 'id' and the other is a block, it's 9986 // acceptable. 9987 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9988 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9989 return true; 9990 9991 return false; 9992 } 9993 9994 // Check that the given Objective-C type argument lists are equivalent. 9995 static bool sameObjCTypeArgs(ASTContext &ctx, 9996 const ObjCInterfaceDecl *iface, 9997 ArrayRef<QualType> lhsArgs, 9998 ArrayRef<QualType> rhsArgs, 9999 bool stripKindOf) { 10000 if (lhsArgs.size() != rhsArgs.size()) 10001 return false; 10002 10003 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 10004 if (!typeParams) 10005 return false; 10006 10007 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 10008 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 10009 continue; 10010 10011 switch (typeParams->begin()[i]->getVariance()) { 10012 case ObjCTypeParamVariance::Invariant: 10013 if (!stripKindOf || 10014 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 10015 rhsArgs[i].stripObjCKindOfType(ctx))) { 10016 return false; 10017 } 10018 break; 10019 10020 case ObjCTypeParamVariance::Covariant: 10021 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 10022 return false; 10023 break; 10024 10025 case ObjCTypeParamVariance::Contravariant: 10026 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 10027 return false; 10028 break; 10029 } 10030 } 10031 10032 return true; 10033 } 10034 10035 QualType ASTContext::areCommonBaseCompatible( 10036 const ObjCObjectPointerType *Lptr, 10037 const ObjCObjectPointerType *Rptr) { 10038 const ObjCObjectType *LHS = Lptr->getObjectType(); 10039 const ObjCObjectType *RHS = Rptr->getObjectType(); 10040 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 10041 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 10042 10043 if (!LDecl || !RDecl) 10044 return {}; 10045 10046 // When either LHS or RHS is a kindof type, we should return a kindof type. 10047 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 10048 // kindof(A). 10049 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 10050 10051 // Follow the left-hand side up the class hierarchy until we either hit a 10052 // root or find the RHS. Record the ancestors in case we don't find it. 10053 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 10054 LHSAncestors; 10055 while (true) { 10056 // Record this ancestor. We'll need this if the common type isn't in the 10057 // path from the LHS to the root. 10058 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 10059 10060 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 10061 // Get the type arguments. 10062 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 10063 bool anyChanges = false; 10064 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10065 // Both have type arguments, compare them. 10066 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10067 LHS->getTypeArgs(), RHS->getTypeArgs(), 10068 /*stripKindOf=*/true)) 10069 return {}; 10070 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10071 // If only one has type arguments, the result will not have type 10072 // arguments. 10073 LHSTypeArgs = {}; 10074 anyChanges = true; 10075 } 10076 10077 // Compute the intersection of protocols. 10078 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10079 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 10080 Protocols); 10081 if (!Protocols.empty()) 10082 anyChanges = true; 10083 10084 // If anything in the LHS will have changed, build a new result type. 10085 // If we need to return a kindof type but LHS is not a kindof type, we 10086 // build a new result type. 10087 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 10088 QualType Result = getObjCInterfaceType(LHS->getInterface()); 10089 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 10090 anyKindOf || LHS->isKindOfType()); 10091 return getObjCObjectPointerType(Result); 10092 } 10093 10094 return getObjCObjectPointerType(QualType(LHS, 0)); 10095 } 10096 10097 // Find the superclass. 10098 QualType LHSSuperType = LHS->getSuperClassType(); 10099 if (LHSSuperType.isNull()) 10100 break; 10101 10102 LHS = LHSSuperType->castAs<ObjCObjectType>(); 10103 } 10104 10105 // We didn't find anything by following the LHS to its root; now check 10106 // the RHS against the cached set of ancestors. 10107 while (true) { 10108 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 10109 if (KnownLHS != LHSAncestors.end()) { 10110 LHS = KnownLHS->second; 10111 10112 // Get the type arguments. 10113 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 10114 bool anyChanges = false; 10115 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10116 // Both have type arguments, compare them. 10117 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10118 LHS->getTypeArgs(), RHS->getTypeArgs(), 10119 /*stripKindOf=*/true)) 10120 return {}; 10121 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10122 // If only one has type arguments, the result will not have type 10123 // arguments. 10124 RHSTypeArgs = {}; 10125 anyChanges = true; 10126 } 10127 10128 // Compute the intersection of protocols. 10129 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10130 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 10131 Protocols); 10132 if (!Protocols.empty()) 10133 anyChanges = true; 10134 10135 // If we need to return a kindof type but RHS is not a kindof type, we 10136 // build a new result type. 10137 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 10138 QualType Result = getObjCInterfaceType(RHS->getInterface()); 10139 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 10140 anyKindOf || RHS->isKindOfType()); 10141 return getObjCObjectPointerType(Result); 10142 } 10143 10144 return getObjCObjectPointerType(QualType(RHS, 0)); 10145 } 10146 10147 // Find the superclass of the RHS. 10148 QualType RHSSuperType = RHS->getSuperClassType(); 10149 if (RHSSuperType.isNull()) 10150 break; 10151 10152 RHS = RHSSuperType->castAs<ObjCObjectType>(); 10153 } 10154 10155 return {}; 10156 } 10157 10158 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 10159 const ObjCObjectType *RHS) { 10160 assert(LHS->getInterface() && "LHS is not an interface type"); 10161 assert(RHS->getInterface() && "RHS is not an interface type"); 10162 10163 // Verify that the base decls are compatible: the RHS must be a subclass of 10164 // the LHS. 10165 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 10166 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 10167 if (!IsSuperClass) 10168 return false; 10169 10170 // If the LHS has protocol qualifiers, determine whether all of them are 10171 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 10172 // LHS). 10173 if (LHS->getNumProtocols() > 0) { 10174 // OK if conversion of LHS to SuperClass results in narrowing of types 10175 // ; i.e., SuperClass may implement at least one of the protocols 10176 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 10177 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 10178 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 10179 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 10180 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 10181 // qualifiers. 10182 for (auto *RHSPI : RHS->quals()) 10183 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 10184 // If there is no protocols associated with RHS, it is not a match. 10185 if (SuperClassInheritedProtocols.empty()) 10186 return false; 10187 10188 for (const auto *LHSProto : LHS->quals()) { 10189 bool SuperImplementsProtocol = false; 10190 for (auto *SuperClassProto : SuperClassInheritedProtocols) 10191 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 10192 SuperImplementsProtocol = true; 10193 break; 10194 } 10195 if (!SuperImplementsProtocol) 10196 return false; 10197 } 10198 } 10199 10200 // If the LHS is specialized, we may need to check type arguments. 10201 if (LHS->isSpecialized()) { 10202 // Follow the superclass chain until we've matched the LHS class in the 10203 // hierarchy. This substitutes type arguments through. 10204 const ObjCObjectType *RHSSuper = RHS; 10205 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 10206 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 10207 10208 // If the RHS is specializd, compare type arguments. 10209 if (RHSSuper->isSpecialized() && 10210 !sameObjCTypeArgs(*this, LHS->getInterface(), 10211 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 10212 /*stripKindOf=*/true)) { 10213 return false; 10214 } 10215 } 10216 10217 return true; 10218 } 10219 10220 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 10221 // get the "pointed to" types 10222 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 10223 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 10224 10225 if (!LHSOPT || !RHSOPT) 10226 return false; 10227 10228 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 10229 canAssignObjCInterfaces(RHSOPT, LHSOPT); 10230 } 10231 10232 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 10233 return canAssignObjCInterfaces( 10234 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 10235 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 10236 } 10237 10238 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 10239 /// both shall have the identically qualified version of a compatible type. 10240 /// C99 6.2.7p1: Two types have compatible types if their types are the 10241 /// same. See 6.7.[2,3,5] for additional rules. 10242 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 10243 bool CompareUnqualified) { 10244 if (getLangOpts().CPlusPlus) 10245 return hasSameType(LHS, RHS); 10246 10247 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 10248 } 10249 10250 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 10251 return typesAreCompatible(LHS, RHS); 10252 } 10253 10254 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 10255 return !mergeTypes(LHS, RHS, true).isNull(); 10256 } 10257 10258 /// mergeTransparentUnionType - if T is a transparent union type and a member 10259 /// of T is compatible with SubType, return the merged type, else return 10260 /// QualType() 10261 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 10262 bool OfBlockPointer, 10263 bool Unqualified) { 10264 if (const RecordType *UT = T->getAsUnionType()) { 10265 RecordDecl *UD = UT->getDecl(); 10266 if (UD->hasAttr<TransparentUnionAttr>()) { 10267 for (const auto *I : UD->fields()) { 10268 QualType ET = I->getType().getUnqualifiedType(); 10269 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 10270 if (!MT.isNull()) 10271 return MT; 10272 } 10273 } 10274 } 10275 10276 return {}; 10277 } 10278 10279 /// mergeFunctionParameterTypes - merge two types which appear as function 10280 /// parameter types 10281 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 10282 bool OfBlockPointer, 10283 bool Unqualified) { 10284 // GNU extension: two types are compatible if they appear as a function 10285 // argument, one of the types is a transparent union type and the other 10286 // type is compatible with a union member 10287 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 10288 Unqualified); 10289 if (!lmerge.isNull()) 10290 return lmerge; 10291 10292 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10293 Unqualified); 10294 if (!rmerge.isNull()) 10295 return rmerge; 10296 10297 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10298 } 10299 10300 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10301 bool OfBlockPointer, bool Unqualified, 10302 bool AllowCXX, 10303 bool IsConditionalOperator) { 10304 const auto *lbase = lhs->castAs<FunctionType>(); 10305 const auto *rbase = rhs->castAs<FunctionType>(); 10306 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10307 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10308 bool allLTypes = true; 10309 bool allRTypes = true; 10310 10311 // Check return type 10312 QualType retType; 10313 if (OfBlockPointer) { 10314 QualType RHS = rbase->getReturnType(); 10315 QualType LHS = lbase->getReturnType(); 10316 bool UnqualifiedResult = Unqualified; 10317 if (!UnqualifiedResult) 10318 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10319 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10320 } 10321 else 10322 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10323 Unqualified); 10324 if (retType.isNull()) 10325 return {}; 10326 10327 if (Unqualified) 10328 retType = retType.getUnqualifiedType(); 10329 10330 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10331 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10332 if (Unqualified) { 10333 LRetType = LRetType.getUnqualifiedType(); 10334 RRetType = RRetType.getUnqualifiedType(); 10335 } 10336 10337 if (getCanonicalType(retType) != LRetType) 10338 allLTypes = false; 10339 if (getCanonicalType(retType) != RRetType) 10340 allRTypes = false; 10341 10342 // FIXME: double check this 10343 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10344 // rbase->getRegParmAttr() != 0 && 10345 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10346 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10347 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10348 10349 // Compatible functions must have compatible calling conventions 10350 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10351 return {}; 10352 10353 // Regparm is part of the calling convention. 10354 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10355 return {}; 10356 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10357 return {}; 10358 10359 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10360 return {}; 10361 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10362 return {}; 10363 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10364 return {}; 10365 10366 // When merging declarations, it's common for supplemental information like 10367 // attributes to only be present in one of the declarations, and we generally 10368 // want type merging to preserve the union of information. So a merged 10369 // function type should be noreturn if it was noreturn in *either* operand 10370 // type. 10371 // 10372 // But for the conditional operator, this is backwards. The result of the 10373 // operator could be either operand, and its type should conservatively 10374 // reflect that. So a function type in a composite type is noreturn only 10375 // if it's noreturn in *both* operand types. 10376 // 10377 // Arguably, noreturn is a kind of subtype, and the conditional operator 10378 // ought to produce the most specific common supertype of its operand types. 10379 // That would differ from this rule in contravariant positions. However, 10380 // neither C nor C++ generally uses this kind of subtype reasoning. Also, 10381 // as a practical matter, it would only affect C code that does abstraction of 10382 // higher-order functions (taking noreturn callbacks!), which is uncommon to 10383 // say the least. So we use the simpler rule. 10384 bool NoReturn = IsConditionalOperator 10385 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn() 10386 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10387 if (lbaseInfo.getNoReturn() != NoReturn) 10388 allLTypes = false; 10389 if (rbaseInfo.getNoReturn() != NoReturn) 10390 allRTypes = false; 10391 10392 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10393 10394 if (lproto && rproto) { // two C99 style function prototypes 10395 assert((AllowCXX || 10396 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10397 "C++ shouldn't be here"); 10398 // Compatible functions must have the same number of parameters 10399 if (lproto->getNumParams() != rproto->getNumParams()) 10400 return {}; 10401 10402 // Variadic and non-variadic functions aren't compatible 10403 if (lproto->isVariadic() != rproto->isVariadic()) 10404 return {}; 10405 10406 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10407 return {}; 10408 10409 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10410 bool canUseLeft, canUseRight; 10411 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10412 newParamInfos)) 10413 return {}; 10414 10415 if (!canUseLeft) 10416 allLTypes = false; 10417 if (!canUseRight) 10418 allRTypes = false; 10419 10420 // Check parameter type compatibility 10421 SmallVector<QualType, 10> types; 10422 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10423 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10424 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10425 QualType paramType = mergeFunctionParameterTypes( 10426 lParamType, rParamType, OfBlockPointer, Unqualified); 10427 if (paramType.isNull()) 10428 return {}; 10429 10430 if (Unqualified) 10431 paramType = paramType.getUnqualifiedType(); 10432 10433 types.push_back(paramType); 10434 if (Unqualified) { 10435 lParamType = lParamType.getUnqualifiedType(); 10436 rParamType = rParamType.getUnqualifiedType(); 10437 } 10438 10439 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10440 allLTypes = false; 10441 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10442 allRTypes = false; 10443 } 10444 10445 if (allLTypes) return lhs; 10446 if (allRTypes) return rhs; 10447 10448 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10449 EPI.ExtInfo = einfo; 10450 EPI.ExtParameterInfos = 10451 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10452 return getFunctionType(retType, types, EPI); 10453 } 10454 10455 if (lproto) allRTypes = false; 10456 if (rproto) allLTypes = false; 10457 10458 const FunctionProtoType *proto = lproto ? lproto : rproto; 10459 if (proto) { 10460 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10461 if (proto->isVariadic()) 10462 return {}; 10463 // Check that the types are compatible with the types that 10464 // would result from default argument promotions (C99 6.7.5.3p15). 10465 // The only types actually affected are promotable integer 10466 // types and floats, which would be passed as a different 10467 // type depending on whether the prototype is visible. 10468 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10469 QualType paramTy = proto->getParamType(i); 10470 10471 // Look at the converted type of enum types, since that is the type used 10472 // to pass enum values. 10473 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10474 paramTy = Enum->getDecl()->getIntegerType(); 10475 if (paramTy.isNull()) 10476 return {}; 10477 } 10478 10479 if (isPromotableIntegerType(paramTy) || 10480 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10481 return {}; 10482 } 10483 10484 if (allLTypes) return lhs; 10485 if (allRTypes) return rhs; 10486 10487 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10488 EPI.ExtInfo = einfo; 10489 return getFunctionType(retType, proto->getParamTypes(), EPI); 10490 } 10491 10492 if (allLTypes) return lhs; 10493 if (allRTypes) return rhs; 10494 return getFunctionNoProtoType(retType, einfo); 10495 } 10496 10497 /// Given that we have an enum type and a non-enum type, try to merge them. 10498 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10499 QualType other, bool isBlockReturnType) { 10500 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10501 // a signed integer type, or an unsigned integer type. 10502 // Compatibility is based on the underlying type, not the promotion 10503 // type. 10504 QualType underlyingType = ET->getDecl()->getIntegerType(); 10505 if (underlyingType.isNull()) 10506 return {}; 10507 if (Context.hasSameType(underlyingType, other)) 10508 return other; 10509 10510 // In block return types, we're more permissive and accept any 10511 // integral type of the same size. 10512 if (isBlockReturnType && other->isIntegerType() && 10513 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10514 return other; 10515 10516 return {}; 10517 } 10518 10519 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, 10520 bool Unqualified, bool BlockReturnType, 10521 bool IsConditionalOperator) { 10522 // For C++ we will not reach this code with reference types (see below), 10523 // for OpenMP variant call overloading we might. 10524 // 10525 // C++ [expr]: If an expression initially has the type "reference to T", the 10526 // type is adjusted to "T" prior to any further analysis, the expression 10527 // designates the object or function denoted by the reference, and the 10528 // expression is an lvalue unless the reference is an rvalue reference and 10529 // the expression is a function call (possibly inside parentheses). 10530 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10531 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10532 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10533 LHS->getTypeClass() == RHS->getTypeClass()) 10534 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10535 OfBlockPointer, Unqualified, BlockReturnType); 10536 if (LHSRefTy || RHSRefTy) 10537 return {}; 10538 10539 if (Unqualified) { 10540 LHS = LHS.getUnqualifiedType(); 10541 RHS = RHS.getUnqualifiedType(); 10542 } 10543 10544 QualType LHSCan = getCanonicalType(LHS), 10545 RHSCan = getCanonicalType(RHS); 10546 10547 // If two types are identical, they are compatible. 10548 if (LHSCan == RHSCan) 10549 return LHS; 10550 10551 // If the qualifiers are different, the types aren't compatible... mostly. 10552 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10553 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10554 if (LQuals != RQuals) { 10555 // If any of these qualifiers are different, we have a type 10556 // mismatch. 10557 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10558 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10559 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10560 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10561 return {}; 10562 10563 // Exactly one GC qualifier difference is allowed: __strong is 10564 // okay if the other type has no GC qualifier but is an Objective 10565 // C object pointer (i.e. implicitly strong by default). We fix 10566 // this by pretending that the unqualified type was actually 10567 // qualified __strong. 10568 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10569 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10570 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10571 10572 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10573 return {}; 10574 10575 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10576 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10577 } 10578 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10579 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10580 } 10581 return {}; 10582 } 10583 10584 // Okay, qualifiers are equal. 10585 10586 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10587 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10588 10589 // We want to consider the two function types to be the same for these 10590 // comparisons, just force one to the other. 10591 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10592 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10593 10594 // Same as above for arrays 10595 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10596 LHSClass = Type::ConstantArray; 10597 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10598 RHSClass = Type::ConstantArray; 10599 10600 // ObjCInterfaces are just specialized ObjCObjects. 10601 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10602 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10603 10604 // Canonicalize ExtVector -> Vector. 10605 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10606 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10607 10608 // If the canonical type classes don't match. 10609 if (LHSClass != RHSClass) { 10610 // Note that we only have special rules for turning block enum 10611 // returns into block int returns, not vice-versa. 10612 if (const auto *ETy = LHS->getAs<EnumType>()) { 10613 return mergeEnumWithInteger(*this, ETy, RHS, false); 10614 } 10615 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10616 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10617 } 10618 // allow block pointer type to match an 'id' type. 10619 if (OfBlockPointer && !BlockReturnType) { 10620 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10621 return LHS; 10622 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10623 return RHS; 10624 } 10625 // Allow __auto_type to match anything; it merges to the type with more 10626 // information. 10627 if (const auto *AT = LHS->getAs<AutoType>()) { 10628 if (!AT->isDeduced() && AT->isGNUAutoType()) 10629 return RHS; 10630 } 10631 if (const auto *AT = RHS->getAs<AutoType>()) { 10632 if (!AT->isDeduced() && AT->isGNUAutoType()) 10633 return LHS; 10634 } 10635 return {}; 10636 } 10637 10638 // The canonical type classes match. 10639 switch (LHSClass) { 10640 #define TYPE(Class, Base) 10641 #define ABSTRACT_TYPE(Class, Base) 10642 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10643 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10644 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10645 #include "clang/AST/TypeNodes.inc" 10646 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10647 10648 case Type::Auto: 10649 case Type::DeducedTemplateSpecialization: 10650 case Type::LValueReference: 10651 case Type::RValueReference: 10652 case Type::MemberPointer: 10653 llvm_unreachable("C++ should never be in mergeTypes"); 10654 10655 case Type::ObjCInterface: 10656 case Type::IncompleteArray: 10657 case Type::VariableArray: 10658 case Type::FunctionProto: 10659 case Type::ExtVector: 10660 llvm_unreachable("Types are eliminated above"); 10661 10662 case Type::Pointer: 10663 { 10664 // Merge two pointer types, while trying to preserve typedef info 10665 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10666 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10667 if (Unqualified) { 10668 LHSPointee = LHSPointee.getUnqualifiedType(); 10669 RHSPointee = RHSPointee.getUnqualifiedType(); 10670 } 10671 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10672 Unqualified); 10673 if (ResultType.isNull()) 10674 return {}; 10675 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10676 return LHS; 10677 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10678 return RHS; 10679 return getPointerType(ResultType); 10680 } 10681 case Type::BlockPointer: 10682 { 10683 // Merge two block pointer types, while trying to preserve typedef info 10684 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10685 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10686 if (Unqualified) { 10687 LHSPointee = LHSPointee.getUnqualifiedType(); 10688 RHSPointee = RHSPointee.getUnqualifiedType(); 10689 } 10690 if (getLangOpts().OpenCL) { 10691 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10692 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10693 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10694 // 6.12.5) thus the following check is asymmetric. 10695 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10696 return {}; 10697 LHSPteeQual.removeAddressSpace(); 10698 RHSPteeQual.removeAddressSpace(); 10699 LHSPointee = 10700 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10701 RHSPointee = 10702 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10703 } 10704 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10705 Unqualified); 10706 if (ResultType.isNull()) 10707 return {}; 10708 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10709 return LHS; 10710 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10711 return RHS; 10712 return getBlockPointerType(ResultType); 10713 } 10714 case Type::Atomic: 10715 { 10716 // Merge two pointer types, while trying to preserve typedef info 10717 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10718 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10719 if (Unqualified) { 10720 LHSValue = LHSValue.getUnqualifiedType(); 10721 RHSValue = RHSValue.getUnqualifiedType(); 10722 } 10723 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10724 Unqualified); 10725 if (ResultType.isNull()) 10726 return {}; 10727 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10728 return LHS; 10729 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10730 return RHS; 10731 return getAtomicType(ResultType); 10732 } 10733 case Type::ConstantArray: 10734 { 10735 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10736 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10737 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10738 return {}; 10739 10740 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10741 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10742 if (Unqualified) { 10743 LHSElem = LHSElem.getUnqualifiedType(); 10744 RHSElem = RHSElem.getUnqualifiedType(); 10745 } 10746 10747 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10748 if (ResultType.isNull()) 10749 return {}; 10750 10751 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10752 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10753 10754 // If either side is a variable array, and both are complete, check whether 10755 // the current dimension is definite. 10756 if (LVAT || RVAT) { 10757 auto SizeFetch = [this](const VariableArrayType* VAT, 10758 const ConstantArrayType* CAT) 10759 -> std::pair<bool,llvm::APInt> { 10760 if (VAT) { 10761 std::optional<llvm::APSInt> TheInt; 10762 Expr *E = VAT->getSizeExpr(); 10763 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10764 return std::make_pair(true, *TheInt); 10765 return std::make_pair(false, llvm::APSInt()); 10766 } 10767 if (CAT) 10768 return std::make_pair(true, CAT->getSize()); 10769 return std::make_pair(false, llvm::APInt()); 10770 }; 10771 10772 bool HaveLSize, HaveRSize; 10773 llvm::APInt LSize, RSize; 10774 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10775 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10776 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10777 return {}; // Definite, but unequal, array dimension 10778 } 10779 10780 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10781 return LHS; 10782 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10783 return RHS; 10784 if (LCAT) 10785 return getConstantArrayType(ResultType, LCAT->getSize(), 10786 LCAT->getSizeExpr(), ArraySizeModifier(), 0); 10787 if (RCAT) 10788 return getConstantArrayType(ResultType, RCAT->getSize(), 10789 RCAT->getSizeExpr(), ArraySizeModifier(), 0); 10790 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10791 return LHS; 10792 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10793 return RHS; 10794 if (LVAT) { 10795 // FIXME: This isn't correct! But tricky to implement because 10796 // the array's size has to be the size of LHS, but the type 10797 // has to be different. 10798 return LHS; 10799 } 10800 if (RVAT) { 10801 // FIXME: This isn't correct! But tricky to implement because 10802 // the array's size has to be the size of RHS, but the type 10803 // has to be different. 10804 return RHS; 10805 } 10806 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10807 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10808 return getIncompleteArrayType(ResultType, ArraySizeModifier(), 0); 10809 } 10810 case Type::FunctionNoProto: 10811 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified, 10812 /*AllowCXX=*/false, IsConditionalOperator); 10813 case Type::Record: 10814 case Type::Enum: 10815 return {}; 10816 case Type::Builtin: 10817 // Only exactly equal builtin types are compatible, which is tested above. 10818 return {}; 10819 case Type::Complex: 10820 // Distinct complex types are incompatible. 10821 return {}; 10822 case Type::Vector: 10823 // FIXME: The merged type should be an ExtVector! 10824 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10825 RHSCan->castAs<VectorType>())) 10826 return LHS; 10827 return {}; 10828 case Type::ConstantMatrix: 10829 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10830 RHSCan->castAs<ConstantMatrixType>())) 10831 return LHS; 10832 return {}; 10833 case Type::ObjCObject: { 10834 // Check if the types are assignment compatible. 10835 // FIXME: This should be type compatibility, e.g. whether 10836 // "LHS x; RHS x;" at global scope is legal. 10837 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10838 RHS->castAs<ObjCObjectType>())) 10839 return LHS; 10840 return {}; 10841 } 10842 case Type::ObjCObjectPointer: 10843 if (OfBlockPointer) { 10844 if (canAssignObjCInterfacesInBlockPointer( 10845 LHS->castAs<ObjCObjectPointerType>(), 10846 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10847 return LHS; 10848 return {}; 10849 } 10850 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10851 RHS->castAs<ObjCObjectPointerType>())) 10852 return LHS; 10853 return {}; 10854 case Type::Pipe: 10855 assert(LHS != RHS && 10856 "Equivalent pipe types should have already been handled!"); 10857 return {}; 10858 case Type::BitInt: { 10859 // Merge two bit-precise int types, while trying to preserve typedef info. 10860 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10861 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10862 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10863 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10864 10865 // Like unsigned/int, shouldn't have a type if they don't match. 10866 if (LHSUnsigned != RHSUnsigned) 10867 return {}; 10868 10869 if (LHSBits != RHSBits) 10870 return {}; 10871 return LHS; 10872 } 10873 } 10874 10875 llvm_unreachable("Invalid Type::Class!"); 10876 } 10877 10878 bool ASTContext::mergeExtParameterInfo( 10879 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10880 bool &CanUseFirst, bool &CanUseSecond, 10881 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10882 assert(NewParamInfos.empty() && "param info list not empty"); 10883 CanUseFirst = CanUseSecond = true; 10884 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10885 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10886 10887 // Fast path: if the first type doesn't have ext parameter infos, 10888 // we match if and only if the second type also doesn't have them. 10889 if (!FirstHasInfo && !SecondHasInfo) 10890 return true; 10891 10892 bool NeedParamInfo = false; 10893 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10894 : SecondFnType->getExtParameterInfos().size(); 10895 10896 for (size_t I = 0; I < E; ++I) { 10897 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10898 if (FirstHasInfo) 10899 FirstParam = FirstFnType->getExtParameterInfo(I); 10900 if (SecondHasInfo) 10901 SecondParam = SecondFnType->getExtParameterInfo(I); 10902 10903 // Cannot merge unless everything except the noescape flag matches. 10904 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10905 return false; 10906 10907 bool FirstNoEscape = FirstParam.isNoEscape(); 10908 bool SecondNoEscape = SecondParam.isNoEscape(); 10909 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10910 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10911 if (NewParamInfos.back().getOpaqueValue()) 10912 NeedParamInfo = true; 10913 if (FirstNoEscape != IsNoEscape) 10914 CanUseFirst = false; 10915 if (SecondNoEscape != IsNoEscape) 10916 CanUseSecond = false; 10917 } 10918 10919 if (!NeedParamInfo) 10920 NewParamInfos.clear(); 10921 10922 return true; 10923 } 10924 10925 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10926 ObjCLayouts[CD] = nullptr; 10927 } 10928 10929 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10930 /// 'RHS' attributes and returns the merged version; including for function 10931 /// return types. 10932 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10933 QualType LHSCan = getCanonicalType(LHS), 10934 RHSCan = getCanonicalType(RHS); 10935 // If two types are identical, they are compatible. 10936 if (LHSCan == RHSCan) 10937 return LHS; 10938 if (RHSCan->isFunctionType()) { 10939 if (!LHSCan->isFunctionType()) 10940 return {}; 10941 QualType OldReturnType = 10942 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10943 QualType NewReturnType = 10944 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10945 QualType ResReturnType = 10946 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10947 if (ResReturnType.isNull()) 10948 return {}; 10949 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10950 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10951 // In either case, use OldReturnType to build the new function type. 10952 const auto *F = LHS->castAs<FunctionType>(); 10953 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10954 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10955 EPI.ExtInfo = getFunctionExtInfo(LHS); 10956 QualType ResultType = 10957 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10958 return ResultType; 10959 } 10960 } 10961 return {}; 10962 } 10963 10964 // If the qualifiers are different, the types can still be merged. 10965 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10966 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10967 if (LQuals != RQuals) { 10968 // If any of these qualifiers are different, we have a type mismatch. 10969 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10970 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10971 return {}; 10972 10973 // Exactly one GC qualifier difference is allowed: __strong is 10974 // okay if the other type has no GC qualifier but is an Objective 10975 // C object pointer (i.e. implicitly strong by default). We fix 10976 // this by pretending that the unqualified type was actually 10977 // qualified __strong. 10978 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10979 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10980 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10981 10982 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10983 return {}; 10984 10985 if (GC_L == Qualifiers::Strong) 10986 return LHS; 10987 if (GC_R == Qualifiers::Strong) 10988 return RHS; 10989 return {}; 10990 } 10991 10992 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10993 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10994 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10995 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10996 if (ResQT == LHSBaseQT) 10997 return LHS; 10998 if (ResQT == RHSBaseQT) 10999 return RHS; 11000 } 11001 return {}; 11002 } 11003 11004 //===----------------------------------------------------------------------===// 11005 // Integer Predicates 11006 //===----------------------------------------------------------------------===// 11007 11008 unsigned ASTContext::getIntWidth(QualType T) const { 11009 if (const auto *ET = T->getAs<EnumType>()) 11010 T = ET->getDecl()->getIntegerType(); 11011 if (T->isBooleanType()) 11012 return 1; 11013 if (const auto *EIT = T->getAs<BitIntType>()) 11014 return EIT->getNumBits(); 11015 // For builtin types, just use the standard type sizing method 11016 return (unsigned)getTypeSize(T); 11017 } 11018 11019 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 11020 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11021 T->isFixedPointType()) && 11022 "Unexpected type"); 11023 11024 // Turn <4 x signed int> -> <4 x unsigned int> 11025 if (const auto *VTy = T->getAs<VectorType>()) 11026 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 11027 VTy->getNumElements(), VTy->getVectorKind()); 11028 11029 // For _BitInt, return an unsigned _BitInt with same width. 11030 if (const auto *EITy = T->getAs<BitIntType>()) 11031 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 11032 11033 // For enums, get the underlying integer type of the enum, and let the general 11034 // integer type signchanging code handle it. 11035 if (const auto *ETy = T->getAs<EnumType>()) 11036 T = ETy->getDecl()->getIntegerType(); 11037 11038 switch (T->castAs<BuiltinType>()->getKind()) { 11039 case BuiltinType::Char_U: 11040 // Plain `char` is mapped to `unsigned char` even if it's already unsigned 11041 case BuiltinType::Char_S: 11042 case BuiltinType::SChar: 11043 case BuiltinType::Char8: 11044 return UnsignedCharTy; 11045 case BuiltinType::Short: 11046 return UnsignedShortTy; 11047 case BuiltinType::Int: 11048 return UnsignedIntTy; 11049 case BuiltinType::Long: 11050 return UnsignedLongTy; 11051 case BuiltinType::LongLong: 11052 return UnsignedLongLongTy; 11053 case BuiltinType::Int128: 11054 return UnsignedInt128Ty; 11055 // wchar_t is special. It is either signed or not, but when it's signed, 11056 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 11057 // version of its underlying type instead. 11058 case BuiltinType::WChar_S: 11059 return getUnsignedWCharType(); 11060 11061 case BuiltinType::ShortAccum: 11062 return UnsignedShortAccumTy; 11063 case BuiltinType::Accum: 11064 return UnsignedAccumTy; 11065 case BuiltinType::LongAccum: 11066 return UnsignedLongAccumTy; 11067 case BuiltinType::SatShortAccum: 11068 return SatUnsignedShortAccumTy; 11069 case BuiltinType::SatAccum: 11070 return SatUnsignedAccumTy; 11071 case BuiltinType::SatLongAccum: 11072 return SatUnsignedLongAccumTy; 11073 case BuiltinType::ShortFract: 11074 return UnsignedShortFractTy; 11075 case BuiltinType::Fract: 11076 return UnsignedFractTy; 11077 case BuiltinType::LongFract: 11078 return UnsignedLongFractTy; 11079 case BuiltinType::SatShortFract: 11080 return SatUnsignedShortFractTy; 11081 case BuiltinType::SatFract: 11082 return SatUnsignedFractTy; 11083 case BuiltinType::SatLongFract: 11084 return SatUnsignedLongFractTy; 11085 default: 11086 assert((T->hasUnsignedIntegerRepresentation() || 11087 T->isUnsignedFixedPointType()) && 11088 "Unexpected signed integer or fixed point type"); 11089 return T; 11090 } 11091 } 11092 11093 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 11094 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11095 T->isFixedPointType()) && 11096 "Unexpected type"); 11097 11098 // Turn <4 x unsigned int> -> <4 x signed int> 11099 if (const auto *VTy = T->getAs<VectorType>()) 11100 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 11101 VTy->getNumElements(), VTy->getVectorKind()); 11102 11103 // For _BitInt, return a signed _BitInt with same width. 11104 if (const auto *EITy = T->getAs<BitIntType>()) 11105 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 11106 11107 // For enums, get the underlying integer type of the enum, and let the general 11108 // integer type signchanging code handle it. 11109 if (const auto *ETy = T->getAs<EnumType>()) 11110 T = ETy->getDecl()->getIntegerType(); 11111 11112 switch (T->castAs<BuiltinType>()->getKind()) { 11113 case BuiltinType::Char_S: 11114 // Plain `char` is mapped to `signed char` even if it's already signed 11115 case BuiltinType::Char_U: 11116 case BuiltinType::UChar: 11117 case BuiltinType::Char8: 11118 return SignedCharTy; 11119 case BuiltinType::UShort: 11120 return ShortTy; 11121 case BuiltinType::UInt: 11122 return IntTy; 11123 case BuiltinType::ULong: 11124 return LongTy; 11125 case BuiltinType::ULongLong: 11126 return LongLongTy; 11127 case BuiltinType::UInt128: 11128 return Int128Ty; 11129 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 11130 // there's no matching "signed wchar_t". Therefore we return the signed 11131 // version of its underlying type instead. 11132 case BuiltinType::WChar_U: 11133 return getSignedWCharType(); 11134 11135 case BuiltinType::UShortAccum: 11136 return ShortAccumTy; 11137 case BuiltinType::UAccum: 11138 return AccumTy; 11139 case BuiltinType::ULongAccum: 11140 return LongAccumTy; 11141 case BuiltinType::SatUShortAccum: 11142 return SatShortAccumTy; 11143 case BuiltinType::SatUAccum: 11144 return SatAccumTy; 11145 case BuiltinType::SatULongAccum: 11146 return SatLongAccumTy; 11147 case BuiltinType::UShortFract: 11148 return ShortFractTy; 11149 case BuiltinType::UFract: 11150 return FractTy; 11151 case BuiltinType::ULongFract: 11152 return LongFractTy; 11153 case BuiltinType::SatUShortFract: 11154 return SatShortFractTy; 11155 case BuiltinType::SatUFract: 11156 return SatFractTy; 11157 case BuiltinType::SatULongFract: 11158 return SatLongFractTy; 11159 default: 11160 assert( 11161 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 11162 "Unexpected signed integer or fixed point type"); 11163 return T; 11164 } 11165 } 11166 11167 ASTMutationListener::~ASTMutationListener() = default; 11168 11169 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 11170 QualType ReturnType) {} 11171 11172 //===----------------------------------------------------------------------===// 11173 // Builtin Type Computation 11174 //===----------------------------------------------------------------------===// 11175 11176 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 11177 /// pointer over the consumed characters. This returns the resultant type. If 11178 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 11179 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 11180 /// a vector of "i*". 11181 /// 11182 /// RequiresICE is filled in on return to indicate whether the value is required 11183 /// to be an Integer Constant Expression. 11184 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 11185 ASTContext::GetBuiltinTypeError &Error, 11186 bool &RequiresICE, 11187 bool AllowTypeModifiers) { 11188 // Modifiers. 11189 int HowLong = 0; 11190 bool Signed = false, Unsigned = false; 11191 RequiresICE = false; 11192 11193 // Read the prefixed modifiers first. 11194 bool Done = false; 11195 #ifndef NDEBUG 11196 bool IsSpecial = false; 11197 #endif 11198 while (!Done) { 11199 switch (*Str++) { 11200 default: Done = true; --Str; break; 11201 case 'I': 11202 RequiresICE = true; 11203 break; 11204 case 'S': 11205 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 11206 assert(!Signed && "Can't use 'S' modifier multiple times!"); 11207 Signed = true; 11208 break; 11209 case 'U': 11210 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 11211 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 11212 Unsigned = true; 11213 break; 11214 case 'L': 11215 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 11216 assert(HowLong <= 2 && "Can't have LLLL modifier"); 11217 ++HowLong; 11218 break; 11219 case 'N': 11220 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 11221 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11222 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 11223 #ifndef NDEBUG 11224 IsSpecial = true; 11225 #endif 11226 if (Context.getTargetInfo().getLongWidth() == 32) 11227 ++HowLong; 11228 break; 11229 case 'W': 11230 // This modifier represents int64 type. 11231 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11232 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 11233 #ifndef NDEBUG 11234 IsSpecial = true; 11235 #endif 11236 switch (Context.getTargetInfo().getInt64Type()) { 11237 default: 11238 llvm_unreachable("Unexpected integer type"); 11239 case TargetInfo::SignedLong: 11240 HowLong = 1; 11241 break; 11242 case TargetInfo::SignedLongLong: 11243 HowLong = 2; 11244 break; 11245 } 11246 break; 11247 case 'Z': 11248 // This modifier represents int32 type. 11249 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11250 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 11251 #ifndef NDEBUG 11252 IsSpecial = true; 11253 #endif 11254 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 11255 default: 11256 llvm_unreachable("Unexpected integer type"); 11257 case TargetInfo::SignedInt: 11258 HowLong = 0; 11259 break; 11260 case TargetInfo::SignedLong: 11261 HowLong = 1; 11262 break; 11263 case TargetInfo::SignedLongLong: 11264 HowLong = 2; 11265 break; 11266 } 11267 break; 11268 case 'O': 11269 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11270 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 11271 #ifndef NDEBUG 11272 IsSpecial = true; 11273 #endif 11274 if (Context.getLangOpts().OpenCL) 11275 HowLong = 1; 11276 else 11277 HowLong = 2; 11278 break; 11279 } 11280 } 11281 11282 QualType Type; 11283 11284 // Read the base type. 11285 switch (*Str++) { 11286 default: llvm_unreachable("Unknown builtin type letter!"); 11287 case 'x': 11288 assert(HowLong == 0 && !Signed && !Unsigned && 11289 "Bad modifiers used with 'x'!"); 11290 Type = Context.Float16Ty; 11291 break; 11292 case 'y': 11293 assert(HowLong == 0 && !Signed && !Unsigned && 11294 "Bad modifiers used with 'y'!"); 11295 Type = Context.BFloat16Ty; 11296 break; 11297 case 'v': 11298 assert(HowLong == 0 && !Signed && !Unsigned && 11299 "Bad modifiers used with 'v'!"); 11300 Type = Context.VoidTy; 11301 break; 11302 case 'h': 11303 assert(HowLong == 0 && !Signed && !Unsigned && 11304 "Bad modifiers used with 'h'!"); 11305 Type = Context.HalfTy; 11306 break; 11307 case 'f': 11308 assert(HowLong == 0 && !Signed && !Unsigned && 11309 "Bad modifiers used with 'f'!"); 11310 Type = Context.FloatTy; 11311 break; 11312 case 'd': 11313 assert(HowLong < 3 && !Signed && !Unsigned && 11314 "Bad modifiers used with 'd'!"); 11315 if (HowLong == 1) 11316 Type = Context.LongDoubleTy; 11317 else if (HowLong == 2) 11318 Type = Context.Float128Ty; 11319 else 11320 Type = Context.DoubleTy; 11321 break; 11322 case 's': 11323 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11324 if (Unsigned) 11325 Type = Context.UnsignedShortTy; 11326 else 11327 Type = Context.ShortTy; 11328 break; 11329 case 'i': 11330 if (HowLong == 3) 11331 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11332 else if (HowLong == 2) 11333 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11334 else if (HowLong == 1) 11335 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11336 else 11337 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11338 break; 11339 case 'c': 11340 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11341 if (Signed) 11342 Type = Context.SignedCharTy; 11343 else if (Unsigned) 11344 Type = Context.UnsignedCharTy; 11345 else 11346 Type = Context.CharTy; 11347 break; 11348 case 'b': // boolean 11349 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11350 Type = Context.BoolTy; 11351 break; 11352 case 'z': // size_t. 11353 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11354 Type = Context.getSizeType(); 11355 break; 11356 case 'w': // wchar_t. 11357 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11358 Type = Context.getWideCharType(); 11359 break; 11360 case 'F': 11361 Type = Context.getCFConstantStringType(); 11362 break; 11363 case 'G': 11364 Type = Context.getObjCIdType(); 11365 break; 11366 case 'H': 11367 Type = Context.getObjCSelType(); 11368 break; 11369 case 'M': 11370 Type = Context.getObjCSuperType(); 11371 break; 11372 case 'a': 11373 Type = Context.getBuiltinVaListType(); 11374 assert(!Type.isNull() && "builtin va list type not initialized!"); 11375 break; 11376 case 'A': 11377 // This is a "reference" to a va_list; however, what exactly 11378 // this means depends on how va_list is defined. There are two 11379 // different kinds of va_list: ones passed by value, and ones 11380 // passed by reference. An example of a by-value va_list is 11381 // x86, where va_list is a char*. An example of by-ref va_list 11382 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11383 // we want this argument to be a char*&; for x86-64, we want 11384 // it to be a __va_list_tag*. 11385 Type = Context.getBuiltinVaListType(); 11386 assert(!Type.isNull() && "builtin va list type not initialized!"); 11387 if (Type->isArrayType()) 11388 Type = Context.getArrayDecayedType(Type); 11389 else 11390 Type = Context.getLValueReferenceType(Type); 11391 break; 11392 case 'q': { 11393 char *End; 11394 unsigned NumElements = strtoul(Str, &End, 10); 11395 assert(End != Str && "Missing vector size"); 11396 Str = End; 11397 11398 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11399 RequiresICE, false); 11400 assert(!RequiresICE && "Can't require vector ICE"); 11401 11402 Type = Context.getScalableVectorType(ElementType, NumElements); 11403 break; 11404 } 11405 case 'Q': { 11406 switch (*Str++) { 11407 case 'a': { 11408 Type = Context.SveCountTy; 11409 break; 11410 } 11411 default: 11412 llvm_unreachable("Unexpected target builtin type"); 11413 } 11414 break; 11415 } 11416 case 'V': { 11417 char *End; 11418 unsigned NumElements = strtoul(Str, &End, 10); 11419 assert(End != Str && "Missing vector size"); 11420 Str = End; 11421 11422 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11423 RequiresICE, false); 11424 assert(!RequiresICE && "Can't require vector ICE"); 11425 11426 // TODO: No way to make AltiVec vectors in builtins yet. 11427 Type = Context.getVectorType(ElementType, NumElements, VectorKind::Generic); 11428 break; 11429 } 11430 case 'E': { 11431 char *End; 11432 11433 unsigned NumElements = strtoul(Str, &End, 10); 11434 assert(End != Str && "Missing vector size"); 11435 11436 Str = End; 11437 11438 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11439 false); 11440 Type = Context.getExtVectorType(ElementType, NumElements); 11441 break; 11442 } 11443 case 'X': { 11444 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11445 false); 11446 assert(!RequiresICE && "Can't require complex ICE"); 11447 Type = Context.getComplexType(ElementType); 11448 break; 11449 } 11450 case 'Y': 11451 Type = Context.getPointerDiffType(); 11452 break; 11453 case 'P': 11454 Type = Context.getFILEType(); 11455 if (Type.isNull()) { 11456 Error = ASTContext::GE_Missing_stdio; 11457 return {}; 11458 } 11459 break; 11460 case 'J': 11461 if (Signed) 11462 Type = Context.getsigjmp_bufType(); 11463 else 11464 Type = Context.getjmp_bufType(); 11465 11466 if (Type.isNull()) { 11467 Error = ASTContext::GE_Missing_setjmp; 11468 return {}; 11469 } 11470 break; 11471 case 'K': 11472 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11473 Type = Context.getucontext_tType(); 11474 11475 if (Type.isNull()) { 11476 Error = ASTContext::GE_Missing_ucontext; 11477 return {}; 11478 } 11479 break; 11480 case 'p': 11481 Type = Context.getProcessIDType(); 11482 break; 11483 } 11484 11485 // If there are modifiers and if we're allowed to parse them, go for it. 11486 Done = !AllowTypeModifiers; 11487 while (!Done) { 11488 switch (char c = *Str++) { 11489 default: Done = true; --Str; break; 11490 case '*': 11491 case '&': { 11492 // Both pointers and references can have their pointee types 11493 // qualified with an address space. 11494 char *End; 11495 unsigned AddrSpace = strtoul(Str, &End, 10); 11496 if (End != Str) { 11497 // Note AddrSpace == 0 is not the same as an unspecified address space. 11498 Type = Context.getAddrSpaceQualType( 11499 Type, 11500 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11501 Str = End; 11502 } 11503 if (c == '*') 11504 Type = Context.getPointerType(Type); 11505 else 11506 Type = Context.getLValueReferenceType(Type); 11507 break; 11508 } 11509 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11510 case 'C': 11511 Type = Type.withConst(); 11512 break; 11513 case 'D': 11514 Type = Context.getVolatileType(Type); 11515 break; 11516 case 'R': 11517 Type = Type.withRestrict(); 11518 break; 11519 } 11520 } 11521 11522 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11523 "Integer constant 'I' type must be an integer"); 11524 11525 return Type; 11526 } 11527 11528 // On some targets such as PowerPC, some of the builtins are defined with custom 11529 // type descriptors for target-dependent types. These descriptors are decoded in 11530 // other functions, but it may be useful to be able to fall back to default 11531 // descriptor decoding to define builtins mixing target-dependent and target- 11532 // independent types. This function allows decoding one type descriptor with 11533 // default decoding. 11534 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11535 GetBuiltinTypeError &Error, bool &RequireICE, 11536 bool AllowTypeModifiers) const { 11537 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11538 } 11539 11540 /// GetBuiltinType - Return the type for the specified builtin. 11541 QualType ASTContext::GetBuiltinType(unsigned Id, 11542 GetBuiltinTypeError &Error, 11543 unsigned *IntegerConstantArgs) const { 11544 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11545 if (TypeStr[0] == '\0') { 11546 Error = GE_Missing_type; 11547 return {}; 11548 } 11549 11550 SmallVector<QualType, 8> ArgTypes; 11551 11552 bool RequiresICE = false; 11553 Error = GE_None; 11554 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11555 RequiresICE, true); 11556 if (Error != GE_None) 11557 return {}; 11558 11559 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11560 11561 while (TypeStr[0] && TypeStr[0] != '.') { 11562 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11563 if (Error != GE_None) 11564 return {}; 11565 11566 // If this argument is required to be an IntegerConstantExpression and the 11567 // caller cares, fill in the bitmask we return. 11568 if (RequiresICE && IntegerConstantArgs) 11569 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11570 11571 // Do array -> pointer decay. The builtin should use the decayed type. 11572 if (Ty->isArrayType()) 11573 Ty = getArrayDecayedType(Ty); 11574 11575 ArgTypes.push_back(Ty); 11576 } 11577 11578 if (Id == Builtin::BI__GetExceptionInfo) 11579 return {}; 11580 11581 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11582 "'.' should only occur at end of builtin type list!"); 11583 11584 bool Variadic = (TypeStr[0] == '.'); 11585 11586 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11587 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11588 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11589 11590 11591 // We really shouldn't be making a no-proto type here. 11592 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 11593 return getFunctionNoProtoType(ResType, EI); 11594 11595 FunctionProtoType::ExtProtoInfo EPI; 11596 EPI.ExtInfo = EI; 11597 EPI.Variadic = Variadic; 11598 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11599 EPI.ExceptionSpec.Type = 11600 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11601 11602 return getFunctionType(ResType, ArgTypes, EPI); 11603 } 11604 11605 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11606 const FunctionDecl *FD) { 11607 if (!FD->isExternallyVisible()) 11608 return GVA_Internal; 11609 11610 // Non-user-provided functions get emitted as weak definitions with every 11611 // use, no matter whether they've been explicitly instantiated etc. 11612 if (!FD->isUserProvided()) 11613 return GVA_DiscardableODR; 11614 11615 GVALinkage External; 11616 switch (FD->getTemplateSpecializationKind()) { 11617 case TSK_Undeclared: 11618 case TSK_ExplicitSpecialization: 11619 External = GVA_StrongExternal; 11620 break; 11621 11622 case TSK_ExplicitInstantiationDefinition: 11623 return GVA_StrongODR; 11624 11625 // C++11 [temp.explicit]p10: 11626 // [ Note: The intent is that an inline function that is the subject of 11627 // an explicit instantiation declaration will still be implicitly 11628 // instantiated when used so that the body can be considered for 11629 // inlining, but that no out-of-line copy of the inline function would be 11630 // generated in the translation unit. -- end note ] 11631 case TSK_ExplicitInstantiationDeclaration: 11632 return GVA_AvailableExternally; 11633 11634 case TSK_ImplicitInstantiation: 11635 External = GVA_DiscardableODR; 11636 break; 11637 } 11638 11639 if (!FD->isInlined()) 11640 return External; 11641 11642 if ((!Context.getLangOpts().CPlusPlus && 11643 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11644 !FD->hasAttr<DLLExportAttr>()) || 11645 FD->hasAttr<GNUInlineAttr>()) { 11646 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11647 11648 // GNU or C99 inline semantics. Determine whether this symbol should be 11649 // externally visible. 11650 if (FD->isInlineDefinitionExternallyVisible()) 11651 return External; 11652 11653 // C99 inline semantics, where the symbol is not externally visible. 11654 return GVA_AvailableExternally; 11655 } 11656 11657 // Functions specified with extern and inline in -fms-compatibility mode 11658 // forcibly get emitted. While the body of the function cannot be later 11659 // replaced, the function definition cannot be discarded. 11660 if (FD->isMSExternInline()) 11661 return GVA_StrongODR; 11662 11663 if (Context.getTargetInfo().getCXXABI().isMicrosoft() && 11664 isa<CXXConstructorDecl>(FD) && 11665 cast<CXXConstructorDecl>(FD)->isInheritingConstructor()) 11666 // Our approach to inheriting constructors is fundamentally different from 11667 // that used by the MS ABI, so keep our inheriting constructor thunks 11668 // internal rather than trying to pick an unambiguous mangling for them. 11669 return GVA_Internal; 11670 11671 return GVA_DiscardableODR; 11672 } 11673 11674 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11675 const Decl *D, GVALinkage L) { 11676 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11677 // dllexport/dllimport on inline functions. 11678 if (D->hasAttr<DLLImportAttr>()) { 11679 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11680 return GVA_AvailableExternally; 11681 } else if (D->hasAttr<DLLExportAttr>()) { 11682 if (L == GVA_DiscardableODR) 11683 return GVA_StrongODR; 11684 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11685 // Device-side functions with __global__ attribute must always be 11686 // visible externally so they can be launched from host. 11687 if (D->hasAttr<CUDAGlobalAttr>() && 11688 (L == GVA_DiscardableODR || L == GVA_Internal)) 11689 return GVA_StrongODR; 11690 // Single source offloading languages like CUDA/HIP need to be able to 11691 // access static device variables from host code of the same compilation 11692 // unit. This is done by externalizing the static variable with a shared 11693 // name between the host and device compilation which is the same for the 11694 // same compilation unit whereas different among different compilation 11695 // units. 11696 if (Context.shouldExternalize(D)) 11697 return GVA_StrongExternal; 11698 } 11699 return L; 11700 } 11701 11702 /// Adjust the GVALinkage for a declaration based on what an external AST source 11703 /// knows about whether there can be other definitions of this declaration. 11704 static GVALinkage 11705 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11706 GVALinkage L) { 11707 ExternalASTSource *Source = Ctx.getExternalSource(); 11708 if (!Source) 11709 return L; 11710 11711 switch (Source->hasExternalDefinitions(D)) { 11712 case ExternalASTSource::EK_Never: 11713 // Other translation units rely on us to provide the definition. 11714 if (L == GVA_DiscardableODR) 11715 return GVA_StrongODR; 11716 break; 11717 11718 case ExternalASTSource::EK_Always: 11719 return GVA_AvailableExternally; 11720 11721 case ExternalASTSource::EK_ReplyHazy: 11722 break; 11723 } 11724 return L; 11725 } 11726 11727 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11728 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11729 adjustGVALinkageForAttributes(*this, FD, 11730 basicGVALinkageForFunction(*this, FD))); 11731 } 11732 11733 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11734 const VarDecl *VD) { 11735 // As an extension for interactive REPLs, make sure constant variables are 11736 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl 11737 // marking them as internal. 11738 if (Context.getLangOpts().CPlusPlus && 11739 Context.getLangOpts().IncrementalExtensions && 11740 VD->getType().isConstQualified() && 11741 !VD->getType().isVolatileQualified() && !VD->isInline() && 11742 !isa<VarTemplateSpecializationDecl>(VD) && !VD->getDescribedVarTemplate()) 11743 return GVA_DiscardableODR; 11744 11745 if (!VD->isExternallyVisible()) 11746 return GVA_Internal; 11747 11748 if (VD->isStaticLocal()) { 11749 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11750 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11751 LexicalContext = LexicalContext->getLexicalParent(); 11752 11753 // ObjC Blocks can create local variables that don't have a FunctionDecl 11754 // LexicalContext. 11755 if (!LexicalContext) 11756 return GVA_DiscardableODR; 11757 11758 // Otherwise, let the static local variable inherit its linkage from the 11759 // nearest enclosing function. 11760 auto StaticLocalLinkage = 11761 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11762 11763 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11764 // be emitted in any object with references to the symbol for the object it 11765 // contains, whether inline or out-of-line." 11766 // Similar behavior is observed with MSVC. An alternative ABI could use 11767 // StrongODR/AvailableExternally to match the function, but none are 11768 // known/supported currently. 11769 if (StaticLocalLinkage == GVA_StrongODR || 11770 StaticLocalLinkage == GVA_AvailableExternally) 11771 return GVA_DiscardableODR; 11772 return StaticLocalLinkage; 11773 } 11774 11775 // MSVC treats in-class initialized static data members as definitions. 11776 // By giving them non-strong linkage, out-of-line definitions won't 11777 // cause link errors. 11778 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11779 return GVA_DiscardableODR; 11780 11781 // Most non-template variables have strong linkage; inline variables are 11782 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11783 GVALinkage StrongLinkage; 11784 switch (Context.getInlineVariableDefinitionKind(VD)) { 11785 case ASTContext::InlineVariableDefinitionKind::None: 11786 StrongLinkage = GVA_StrongExternal; 11787 break; 11788 case ASTContext::InlineVariableDefinitionKind::Weak: 11789 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11790 StrongLinkage = GVA_DiscardableODR; 11791 break; 11792 case ASTContext::InlineVariableDefinitionKind::Strong: 11793 StrongLinkage = GVA_StrongODR; 11794 break; 11795 } 11796 11797 switch (VD->getTemplateSpecializationKind()) { 11798 case TSK_Undeclared: 11799 return StrongLinkage; 11800 11801 case TSK_ExplicitSpecialization: 11802 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11803 VD->isStaticDataMember() 11804 ? GVA_StrongODR 11805 : StrongLinkage; 11806 11807 case TSK_ExplicitInstantiationDefinition: 11808 return GVA_StrongODR; 11809 11810 case TSK_ExplicitInstantiationDeclaration: 11811 return GVA_AvailableExternally; 11812 11813 case TSK_ImplicitInstantiation: 11814 return GVA_DiscardableODR; 11815 } 11816 11817 llvm_unreachable("Invalid Linkage!"); 11818 } 11819 11820 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const { 11821 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11822 adjustGVALinkageForAttributes(*this, VD, 11823 basicGVALinkageForVariable(*this, VD))); 11824 } 11825 11826 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11827 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11828 if (!VD->isFileVarDecl()) 11829 return false; 11830 // Global named register variables (GNU extension) are never emitted. 11831 if (VD->getStorageClass() == SC_Register) 11832 return false; 11833 if (VD->getDescribedVarTemplate() || 11834 isa<VarTemplatePartialSpecializationDecl>(VD)) 11835 return false; 11836 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11837 // We never need to emit an uninstantiated function template. 11838 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11839 return false; 11840 } else if (isa<PragmaCommentDecl>(D)) 11841 return true; 11842 else if (isa<PragmaDetectMismatchDecl>(D)) 11843 return true; 11844 else if (isa<OMPRequiresDecl>(D)) 11845 return true; 11846 else if (isa<OMPThreadPrivateDecl>(D)) 11847 return !D->getDeclContext()->isDependentContext(); 11848 else if (isa<OMPAllocateDecl>(D)) 11849 return !D->getDeclContext()->isDependentContext(); 11850 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11851 return !D->getDeclContext()->isDependentContext(); 11852 else if (isa<ImportDecl>(D)) 11853 return true; 11854 else 11855 return false; 11856 11857 // If this is a member of a class template, we do not need to emit it. 11858 if (D->getDeclContext()->isDependentContext()) 11859 return false; 11860 11861 // Weak references don't produce any output by themselves. 11862 if (D->hasAttr<WeakRefAttr>()) 11863 return false; 11864 11865 // Aliases and used decls are required. 11866 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11867 return true; 11868 11869 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11870 // Forward declarations aren't required. 11871 if (!FD->doesThisDeclarationHaveABody()) 11872 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11873 11874 // Constructors and destructors are required. 11875 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11876 return true; 11877 11878 // The key function for a class is required. This rule only comes 11879 // into play when inline functions can be key functions, though. 11880 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11881 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11882 const CXXRecordDecl *RD = MD->getParent(); 11883 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11884 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11885 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11886 return true; 11887 } 11888 } 11889 } 11890 11891 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11892 11893 // static, static inline, always_inline, and extern inline functions can 11894 // always be deferred. Normal inline functions can be deferred in C99/C++. 11895 // Implicit template instantiations can also be deferred in C++. 11896 return !isDiscardableGVALinkage(Linkage); 11897 } 11898 11899 const auto *VD = cast<VarDecl>(D); 11900 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11901 11902 // If the decl is marked as `declare target to`, it should be emitted for the 11903 // host and for the device. 11904 if (LangOpts.OpenMP && 11905 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11906 return true; 11907 11908 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11909 !isMSStaticDataMemberInlineDefinition(VD)) 11910 return false; 11911 11912 // Variables in other module units shouldn't be forced to be emitted. 11913 if (VD->isInAnotherModuleUnit()) 11914 return false; 11915 11916 // Variables that can be needed in other TUs are required. 11917 auto Linkage = GetGVALinkageForVariable(VD); 11918 if (!isDiscardableGVALinkage(Linkage)) 11919 return true; 11920 11921 // We never need to emit a variable that is available in another TU. 11922 if (Linkage == GVA_AvailableExternally) 11923 return false; 11924 11925 // Variables that have destruction with side-effects are required. 11926 if (VD->needsDestruction(*this)) 11927 return true; 11928 11929 // Variables that have initialization with side-effects are required. 11930 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11931 // We can get a value-dependent initializer during error recovery. 11932 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11933 return true; 11934 11935 // Likewise, variables with tuple-like bindings are required if their 11936 // bindings have side-effects. 11937 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11938 for (const auto *BD : DD->bindings()) 11939 if (const auto *BindingVD = BD->getHoldingVar()) 11940 if (DeclMustBeEmitted(BindingVD)) 11941 return true; 11942 11943 return false; 11944 } 11945 11946 void ASTContext::forEachMultiversionedFunctionVersion( 11947 const FunctionDecl *FD, 11948 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11949 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11950 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11951 FD = FD->getMostRecentDecl(); 11952 // FIXME: The order of traversal here matters and depends on the order of 11953 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11954 // shouldn't rely on that. 11955 for (auto *CurDecl : 11956 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11957 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11958 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11959 !SeenDecls.contains(CurFD)) { 11960 SeenDecls.insert(CurFD); 11961 Pred(CurFD); 11962 } 11963 } 11964 } 11965 11966 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11967 bool IsCXXMethod, 11968 bool IsBuiltin) const { 11969 // Pass through to the C++ ABI object 11970 if (IsCXXMethod) 11971 return ABI->getDefaultMethodCallConv(IsVariadic); 11972 11973 // Builtins ignore user-specified default calling convention and remain the 11974 // Target's default calling convention. 11975 if (!IsBuiltin) { 11976 switch (LangOpts.getDefaultCallingConv()) { 11977 case LangOptions::DCC_None: 11978 break; 11979 case LangOptions::DCC_CDecl: 11980 return CC_C; 11981 case LangOptions::DCC_FastCall: 11982 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11983 return CC_X86FastCall; 11984 break; 11985 case LangOptions::DCC_StdCall: 11986 if (!IsVariadic) 11987 return CC_X86StdCall; 11988 break; 11989 case LangOptions::DCC_VectorCall: 11990 // __vectorcall cannot be applied to variadic functions. 11991 if (!IsVariadic) 11992 return CC_X86VectorCall; 11993 break; 11994 case LangOptions::DCC_RegCall: 11995 // __regcall cannot be applied to variadic functions. 11996 if (!IsVariadic) 11997 return CC_X86RegCall; 11998 break; 11999 case LangOptions::DCC_RtdCall: 12000 if (!IsVariadic) 12001 return CC_M68kRTD; 12002 break; 12003 } 12004 } 12005 return Target->getDefaultCallingConv(); 12006 } 12007 12008 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 12009 // Pass through to the C++ ABI object 12010 return ABI->isNearlyEmpty(RD); 12011 } 12012 12013 VTableContextBase *ASTContext::getVTableContext() { 12014 if (!VTContext.get()) { 12015 auto ABI = Target->getCXXABI(); 12016 if (ABI.isMicrosoft()) 12017 VTContext.reset(new MicrosoftVTableContext(*this)); 12018 else { 12019 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 12020 ? ItaniumVTableContext::Relative 12021 : ItaniumVTableContext::Pointer; 12022 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 12023 } 12024 } 12025 return VTContext.get(); 12026 } 12027 12028 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 12029 if (!T) 12030 T = Target; 12031 switch (T->getCXXABI().getKind()) { 12032 case TargetCXXABI::AppleARM64: 12033 case TargetCXXABI::Fuchsia: 12034 case TargetCXXABI::GenericAArch64: 12035 case TargetCXXABI::GenericItanium: 12036 case TargetCXXABI::GenericARM: 12037 case TargetCXXABI::GenericMIPS: 12038 case TargetCXXABI::iOS: 12039 case TargetCXXABI::WebAssembly: 12040 case TargetCXXABI::WatchOS: 12041 case TargetCXXABI::XL: 12042 return ItaniumMangleContext::create(*this, getDiagnostics()); 12043 case TargetCXXABI::Microsoft: 12044 return MicrosoftMangleContext::create(*this, getDiagnostics()); 12045 } 12046 llvm_unreachable("Unsupported ABI"); 12047 } 12048 12049 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 12050 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 12051 "Device mangle context does not support Microsoft mangling."); 12052 switch (T.getCXXABI().getKind()) { 12053 case TargetCXXABI::AppleARM64: 12054 case TargetCXXABI::Fuchsia: 12055 case TargetCXXABI::GenericAArch64: 12056 case TargetCXXABI::GenericItanium: 12057 case TargetCXXABI::GenericARM: 12058 case TargetCXXABI::GenericMIPS: 12059 case TargetCXXABI::iOS: 12060 case TargetCXXABI::WebAssembly: 12061 case TargetCXXABI::WatchOS: 12062 case TargetCXXABI::XL: 12063 return ItaniumMangleContext::create( 12064 *this, getDiagnostics(), 12065 [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> { 12066 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 12067 return RD->getDeviceLambdaManglingNumber(); 12068 return std::nullopt; 12069 }, 12070 /*IsAux=*/true); 12071 case TargetCXXABI::Microsoft: 12072 return MicrosoftMangleContext::create(*this, getDiagnostics(), 12073 /*IsAux=*/true); 12074 } 12075 llvm_unreachable("Unsupported ABI"); 12076 } 12077 12078 CXXABI::~CXXABI() = default; 12079 12080 size_t ASTContext::getSideTableAllocatedMemory() const { 12081 return ASTRecordLayouts.getMemorySize() + 12082 llvm::capacity_in_bytes(ObjCLayouts) + 12083 llvm::capacity_in_bytes(KeyFunctions) + 12084 llvm::capacity_in_bytes(ObjCImpls) + 12085 llvm::capacity_in_bytes(BlockVarCopyInits) + 12086 llvm::capacity_in_bytes(DeclAttrs) + 12087 llvm::capacity_in_bytes(TemplateOrInstantiation) + 12088 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 12089 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 12090 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 12091 llvm::capacity_in_bytes(OverriddenMethods) + 12092 llvm::capacity_in_bytes(Types) + 12093 llvm::capacity_in_bytes(VariableArrayTypes); 12094 } 12095 12096 /// getIntTypeForBitwidth - 12097 /// sets integer QualTy according to specified details: 12098 /// bitwidth, signed/unsigned. 12099 /// Returns empty type if there is no appropriate target types. 12100 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 12101 unsigned Signed) const { 12102 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 12103 CanQualType QualTy = getFromTargetType(Ty); 12104 if (!QualTy && DestWidth == 128) 12105 return Signed ? Int128Ty : UnsignedInt128Ty; 12106 return QualTy; 12107 } 12108 12109 /// getRealTypeForBitwidth - 12110 /// sets floating point QualTy according to specified bitwidth. 12111 /// Returns empty type if there is no appropriate target types. 12112 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 12113 FloatModeKind ExplicitType) const { 12114 FloatModeKind Ty = 12115 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 12116 switch (Ty) { 12117 case FloatModeKind::Half: 12118 return HalfTy; 12119 case FloatModeKind::Float: 12120 return FloatTy; 12121 case FloatModeKind::Double: 12122 return DoubleTy; 12123 case FloatModeKind::LongDouble: 12124 return LongDoubleTy; 12125 case FloatModeKind::Float128: 12126 return Float128Ty; 12127 case FloatModeKind::Ibm128: 12128 return Ibm128Ty; 12129 case FloatModeKind::NoFloat: 12130 return {}; 12131 } 12132 12133 llvm_unreachable("Unhandled TargetInfo::RealType value"); 12134 } 12135 12136 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 12137 if (Number > 1) 12138 MangleNumbers[ND] = Number; 12139 } 12140 12141 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 12142 bool ForAuxTarget) const { 12143 auto I = MangleNumbers.find(ND); 12144 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 12145 // CUDA/HIP host compilation encodes host and device mangling numbers 12146 // as lower and upper half of 32 bit integer. 12147 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 12148 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 12149 } else { 12150 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 12151 "number for aux target"); 12152 } 12153 return Res > 1 ? Res : 1; 12154 } 12155 12156 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 12157 if (Number > 1) 12158 StaticLocalNumbers[VD] = Number; 12159 } 12160 12161 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 12162 auto I = StaticLocalNumbers.find(VD); 12163 return I != StaticLocalNumbers.end() ? I->second : 1; 12164 } 12165 12166 MangleNumberingContext & 12167 ASTContext::getManglingNumberContext(const DeclContext *DC) { 12168 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12169 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 12170 if (!MCtx) 12171 MCtx = createMangleNumberingContext(); 12172 return *MCtx; 12173 } 12174 12175 MangleNumberingContext & 12176 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 12177 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12178 std::unique_ptr<MangleNumberingContext> &MCtx = 12179 ExtraMangleNumberingContexts[D]; 12180 if (!MCtx) 12181 MCtx = createMangleNumberingContext(); 12182 return *MCtx; 12183 } 12184 12185 std::unique_ptr<MangleNumberingContext> 12186 ASTContext::createMangleNumberingContext() const { 12187 return ABI->createMangleNumberingContext(); 12188 } 12189 12190 const CXXConstructorDecl * 12191 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 12192 return ABI->getCopyConstructorForExceptionObject( 12193 cast<CXXRecordDecl>(RD->getFirstDecl())); 12194 } 12195 12196 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 12197 CXXConstructorDecl *CD) { 12198 return ABI->addCopyConstructorForExceptionObject( 12199 cast<CXXRecordDecl>(RD->getFirstDecl()), 12200 cast<CXXConstructorDecl>(CD->getFirstDecl())); 12201 } 12202 12203 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 12204 TypedefNameDecl *DD) { 12205 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 12206 } 12207 12208 TypedefNameDecl * 12209 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 12210 return ABI->getTypedefNameForUnnamedTagDecl(TD); 12211 } 12212 12213 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 12214 DeclaratorDecl *DD) { 12215 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 12216 } 12217 12218 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 12219 return ABI->getDeclaratorForUnnamedTagDecl(TD); 12220 } 12221 12222 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 12223 ParamIndices[D] = index; 12224 } 12225 12226 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 12227 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 12228 assert(I != ParamIndices.end() && 12229 "ParmIndices lacks entry set by ParmVarDecl"); 12230 return I->second; 12231 } 12232 12233 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 12234 unsigned Length) const { 12235 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 12236 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 12237 EltTy = EltTy.withConst(); 12238 12239 EltTy = adjustStringLiteralBaseType(EltTy); 12240 12241 // Get an array type for the string, according to C99 6.4.5. This includes 12242 // the null terminator character. 12243 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 12244 ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0); 12245 } 12246 12247 StringLiteral * 12248 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 12249 StringLiteral *&Result = StringLiteralCache[Key]; 12250 if (!Result) 12251 Result = StringLiteral::Create( 12252 *this, Key, StringLiteralKind::Ordinary, 12253 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 12254 SourceLocation()); 12255 return Result; 12256 } 12257 12258 MSGuidDecl * 12259 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 12260 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 12261 12262 llvm::FoldingSetNodeID ID; 12263 MSGuidDecl::Profile(ID, Parts); 12264 12265 void *InsertPos; 12266 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 12267 return Existing; 12268 12269 QualType GUIDType = getMSGuidType().withConst(); 12270 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 12271 MSGuidDecls.InsertNode(New, InsertPos); 12272 return New; 12273 } 12274 12275 UnnamedGlobalConstantDecl * 12276 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 12277 const APValue &APVal) const { 12278 llvm::FoldingSetNodeID ID; 12279 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 12280 12281 void *InsertPos; 12282 if (UnnamedGlobalConstantDecl *Existing = 12283 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 12284 return Existing; 12285 12286 UnnamedGlobalConstantDecl *New = 12287 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 12288 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 12289 return New; 12290 } 12291 12292 TemplateParamObjectDecl * 12293 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 12294 assert(T->isRecordType() && "template param object of unexpected type"); 12295 12296 // C++ [temp.param]p8: 12297 // [...] a static storage duration object of type 'const T' [...] 12298 T.addConst(); 12299 12300 llvm::FoldingSetNodeID ID; 12301 TemplateParamObjectDecl::Profile(ID, T, V); 12302 12303 void *InsertPos; 12304 if (TemplateParamObjectDecl *Existing = 12305 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 12306 return Existing; 12307 12308 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 12309 TemplateParamObjectDecls.InsertNode(New, InsertPos); 12310 return New; 12311 } 12312 12313 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 12314 const llvm::Triple &T = getTargetInfo().getTriple(); 12315 if (!T.isOSDarwin()) 12316 return false; 12317 12318 if (!(T.isiOS() && T.isOSVersionLT(7)) && 12319 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 12320 return false; 12321 12322 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 12323 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 12324 uint64_t Size = sizeChars.getQuantity(); 12325 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 12326 unsigned Align = alignChars.getQuantity(); 12327 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 12328 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 12329 } 12330 12331 bool 12332 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 12333 const ObjCMethodDecl *MethodImpl) { 12334 // No point trying to match an unavailable/deprecated mothod. 12335 if (MethodDecl->hasAttr<UnavailableAttr>() 12336 || MethodDecl->hasAttr<DeprecatedAttr>()) 12337 return false; 12338 if (MethodDecl->getObjCDeclQualifier() != 12339 MethodImpl->getObjCDeclQualifier()) 12340 return false; 12341 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 12342 return false; 12343 12344 if (MethodDecl->param_size() != MethodImpl->param_size()) 12345 return false; 12346 12347 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 12348 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 12349 EF = MethodDecl->param_end(); 12350 IM != EM && IF != EF; ++IM, ++IF) { 12351 const ParmVarDecl *DeclVar = (*IF); 12352 const ParmVarDecl *ImplVar = (*IM); 12353 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 12354 return false; 12355 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 12356 return false; 12357 } 12358 12359 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12360 } 12361 12362 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12363 LangAS AS; 12364 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12365 AS = LangAS::Default; 12366 else 12367 AS = QT->getPointeeType().getAddressSpace(); 12368 12369 return getTargetInfo().getNullPointerValue(AS); 12370 } 12371 12372 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12373 return getTargetInfo().getTargetAddressSpace(AS); 12374 } 12375 12376 bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const { 12377 if (X == Y) 12378 return true; 12379 if (!X || !Y) 12380 return false; 12381 llvm::FoldingSetNodeID IDX, IDY; 12382 X->Profile(IDX, *this, /*Canonical=*/true); 12383 Y->Profile(IDY, *this, /*Canonical=*/true); 12384 return IDX == IDY; 12385 } 12386 12387 // The getCommon* helpers return, for given 'same' X and Y entities given as 12388 // inputs, another entity which is also the 'same' as the inputs, but which 12389 // is closer to the canonical form of the inputs, each according to a given 12390 // criteria. 12391 // The getCommon*Checked variants are 'null inputs not-allowed' equivalents of 12392 // the regular ones. 12393 12394 static Decl *getCommonDecl(Decl *X, Decl *Y) { 12395 if (!declaresSameEntity(X, Y)) 12396 return nullptr; 12397 for (const Decl *DX : X->redecls()) { 12398 // If we reach Y before reaching the first decl, that means X is older. 12399 if (DX == Y) 12400 return X; 12401 // If we reach the first decl, then Y is older. 12402 if (DX->isFirstDecl()) 12403 return Y; 12404 } 12405 llvm_unreachable("Corrupt redecls chain"); 12406 } 12407 12408 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12409 static T *getCommonDecl(T *X, T *Y) { 12410 return cast_or_null<T>( 12411 getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)), 12412 const_cast<Decl *>(cast_or_null<Decl>(Y)))); 12413 } 12414 12415 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12416 static T *getCommonDeclChecked(T *X, T *Y) { 12417 return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)), 12418 const_cast<Decl *>(cast<Decl>(Y)))); 12419 } 12420 12421 static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X, 12422 TemplateName Y) { 12423 if (X.getAsVoidPointer() == Y.getAsVoidPointer()) 12424 return X; 12425 // FIXME: There are cases here where we could find a common template name 12426 // with more sugar. For example one could be a SubstTemplateTemplate* 12427 // replacing the other. 12428 TemplateName CX = Ctx.getCanonicalTemplateName(X); 12429 if (CX.getAsVoidPointer() != 12430 Ctx.getCanonicalTemplateName(Y).getAsVoidPointer()) 12431 return TemplateName(); 12432 return CX; 12433 } 12434 12435 static TemplateName 12436 getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) { 12437 TemplateName R = getCommonTemplateName(Ctx, X, Y); 12438 assert(R.getAsVoidPointer() != nullptr); 12439 return R; 12440 } 12441 12442 static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs, 12443 ArrayRef<QualType> Ys, bool Unqualified = false) { 12444 assert(Xs.size() == Ys.size()); 12445 SmallVector<QualType, 8> Rs(Xs.size()); 12446 for (size_t I = 0; I < Rs.size(); ++I) 12447 Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified); 12448 return Rs; 12449 } 12450 12451 template <class T> 12452 static SourceLocation getCommonAttrLoc(const T *X, const T *Y) { 12453 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc() 12454 : SourceLocation(); 12455 } 12456 12457 static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx, 12458 const TemplateArgument &X, 12459 const TemplateArgument &Y) { 12460 if (X.getKind() != Y.getKind()) 12461 return TemplateArgument(); 12462 12463 switch (X.getKind()) { 12464 case TemplateArgument::ArgKind::Type: 12465 if (!Ctx.hasSameType(X.getAsType(), Y.getAsType())) 12466 return TemplateArgument(); 12467 return TemplateArgument( 12468 Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType())); 12469 case TemplateArgument::ArgKind::NullPtr: 12470 if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) 12471 return TemplateArgument(); 12472 return TemplateArgument( 12473 Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()), 12474 /*Unqualified=*/true); 12475 case TemplateArgument::ArgKind::Expression: 12476 if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType())) 12477 return TemplateArgument(); 12478 // FIXME: Try to keep the common sugar. 12479 return X; 12480 case TemplateArgument::ArgKind::Template: { 12481 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate(); 12482 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12483 if (!CTN.getAsVoidPointer()) 12484 return TemplateArgument(); 12485 return TemplateArgument(CTN); 12486 } 12487 case TemplateArgument::ArgKind::TemplateExpansion: { 12488 TemplateName TX = X.getAsTemplateOrTemplatePattern(), 12489 TY = Y.getAsTemplateOrTemplatePattern(); 12490 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12491 if (!CTN.getAsVoidPointer()) 12492 return TemplateName(); 12493 auto NExpX = X.getNumTemplateExpansions(); 12494 assert(NExpX == Y.getNumTemplateExpansions()); 12495 return TemplateArgument(CTN, NExpX); 12496 } 12497 default: 12498 // FIXME: Handle the other argument kinds. 12499 return X; 12500 } 12501 } 12502 12503 static bool getCommonTemplateArguments(ASTContext &Ctx, 12504 SmallVectorImpl<TemplateArgument> &R, 12505 ArrayRef<TemplateArgument> Xs, 12506 ArrayRef<TemplateArgument> Ys) { 12507 if (Xs.size() != Ys.size()) 12508 return true; 12509 R.resize(Xs.size()); 12510 for (size_t I = 0; I < R.size(); ++I) { 12511 R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]); 12512 if (R[I].isNull()) 12513 return true; 12514 } 12515 return false; 12516 } 12517 12518 static auto getCommonTemplateArguments(ASTContext &Ctx, 12519 ArrayRef<TemplateArgument> Xs, 12520 ArrayRef<TemplateArgument> Ys) { 12521 SmallVector<TemplateArgument, 8> R; 12522 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys); 12523 assert(!Different); 12524 (void)Different; 12525 return R; 12526 } 12527 12528 template <class T> 12529 static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) { 12530 return X->getKeyword() == Y->getKeyword() ? X->getKeyword() 12531 : ElaboratedTypeKeyword::None; 12532 } 12533 12534 template <class T> 12535 static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X, 12536 const T *Y) { 12537 // FIXME: Try to keep the common NNS sugar. 12538 return X->getQualifier() == Y->getQualifier() 12539 ? X->getQualifier() 12540 : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier()); 12541 } 12542 12543 template <class T> 12544 static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) { 12545 return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType()); 12546 } 12547 12548 template <class T> 12549 static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X, 12550 Qualifiers &QX, const T *Y, 12551 Qualifiers &QY) { 12552 QualType EX = X->getElementType(), EY = Y->getElementType(); 12553 QualType R = Ctx.getCommonSugaredType(EX, EY, 12554 /*Unqualified=*/true); 12555 Qualifiers RQ = R.getQualifiers(); 12556 QX += EX.getQualifiers() - RQ; 12557 QY += EY.getQualifiers() - RQ; 12558 return R; 12559 } 12560 12561 template <class T> 12562 static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) { 12563 return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType()); 12564 } 12565 12566 template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) { 12567 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr())); 12568 return X->getSizeExpr(); 12569 } 12570 12571 static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) { 12572 assert(X->getSizeModifier() == Y->getSizeModifier()); 12573 return X->getSizeModifier(); 12574 } 12575 12576 static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X, 12577 const ArrayType *Y) { 12578 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers()); 12579 return X->getIndexTypeCVRQualifiers(); 12580 } 12581 12582 // Merges two type lists such that the resulting vector will contain 12583 // each type (in a canonical sense) only once, in the order they appear 12584 // from X to Y. If they occur in both X and Y, the result will contain 12585 // the common sugared type between them. 12586 static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out, 12587 ArrayRef<QualType> X, ArrayRef<QualType> Y) { 12588 llvm::DenseMap<QualType, unsigned> Found; 12589 for (auto Ts : {X, Y}) { 12590 for (QualType T : Ts) { 12591 auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size()); 12592 if (!Res.second) { 12593 QualType &U = Out[Res.first->second]; 12594 U = Ctx.getCommonSugaredType(U, T); 12595 } else { 12596 Out.emplace_back(T); 12597 } 12598 } 12599 } 12600 } 12601 12602 FunctionProtoType::ExceptionSpecInfo 12603 ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1, 12604 FunctionProtoType::ExceptionSpecInfo ESI2, 12605 SmallVectorImpl<QualType> &ExceptionTypeStorage, 12606 bool AcceptDependent) { 12607 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type; 12608 12609 // If either of them can throw anything, that is the result. 12610 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) { 12611 if (EST1 == I) 12612 return ESI1; 12613 if (EST2 == I) 12614 return ESI2; 12615 } 12616 12617 // If either of them is non-throwing, the result is the other. 12618 for (auto I : 12619 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) { 12620 if (EST1 == I) 12621 return ESI2; 12622 if (EST2 == I) 12623 return ESI1; 12624 } 12625 12626 // If we're left with value-dependent computed noexcept expressions, we're 12627 // stuck. Before C++17, we can just drop the exception specification entirely, 12628 // since it's not actually part of the canonical type. And this should never 12629 // happen in C++17, because it would mean we were computing the composite 12630 // pointer type of dependent types, which should never happen. 12631 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) { 12632 assert(AcceptDependent && 12633 "computing composite pointer type of dependent types"); 12634 return FunctionProtoType::ExceptionSpecInfo(); 12635 } 12636 12637 // Switch over the possibilities so that people adding new values know to 12638 // update this function. 12639 switch (EST1) { 12640 case EST_None: 12641 case EST_DynamicNone: 12642 case EST_MSAny: 12643 case EST_BasicNoexcept: 12644 case EST_DependentNoexcept: 12645 case EST_NoexceptFalse: 12646 case EST_NoexceptTrue: 12647 case EST_NoThrow: 12648 llvm_unreachable("These ESTs should be handled above"); 12649 12650 case EST_Dynamic: { 12651 // This is the fun case: both exception specifications are dynamic. Form 12652 // the union of the two lists. 12653 assert(EST2 == EST_Dynamic && "other cases should already be handled"); 12654 mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions, 12655 ESI2.Exceptions); 12656 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic); 12657 Result.Exceptions = ExceptionTypeStorage; 12658 return Result; 12659 } 12660 12661 case EST_Unevaluated: 12662 case EST_Uninstantiated: 12663 case EST_Unparsed: 12664 llvm_unreachable("shouldn't see unresolved exception specifications here"); 12665 } 12666 12667 llvm_unreachable("invalid ExceptionSpecificationType"); 12668 } 12669 12670 static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, 12671 Qualifiers &QX, const Type *Y, 12672 Qualifiers &QY) { 12673 Type::TypeClass TC = X->getTypeClass(); 12674 assert(TC == Y->getTypeClass()); 12675 switch (TC) { 12676 #define UNEXPECTED_TYPE(Class, Kind) \ 12677 case Type::Class: \ 12678 llvm_unreachable("Unexpected " Kind ": " #Class); 12679 12680 #define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical") 12681 #define TYPE(Class, Base) 12682 #include "clang/AST/TypeNodes.inc" 12683 12684 #define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free") 12685 SUGAR_FREE_TYPE(Builtin) 12686 SUGAR_FREE_TYPE(DeducedTemplateSpecialization) 12687 SUGAR_FREE_TYPE(DependentBitInt) 12688 SUGAR_FREE_TYPE(Enum) 12689 SUGAR_FREE_TYPE(BitInt) 12690 SUGAR_FREE_TYPE(ObjCInterface) 12691 SUGAR_FREE_TYPE(Record) 12692 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack) 12693 SUGAR_FREE_TYPE(UnresolvedUsing) 12694 #undef SUGAR_FREE_TYPE 12695 #define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique") 12696 NON_UNIQUE_TYPE(TypeOfExpr) 12697 NON_UNIQUE_TYPE(VariableArray) 12698 #undef NON_UNIQUE_TYPE 12699 12700 UNEXPECTED_TYPE(TypeOf, "sugar") 12701 12702 #undef UNEXPECTED_TYPE 12703 12704 case Type::Auto: { 12705 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 12706 assert(AX->getDeducedType().isNull()); 12707 assert(AY->getDeducedType().isNull()); 12708 assert(AX->getKeyword() == AY->getKeyword()); 12709 assert(AX->isInstantiationDependentType() == 12710 AY->isInstantiationDependentType()); 12711 auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(), 12712 AY->getTypeConstraintArguments()); 12713 return Ctx.getAutoType(QualType(), AX->getKeyword(), 12714 AX->isInstantiationDependentType(), 12715 AX->containsUnexpandedParameterPack(), 12716 getCommonDeclChecked(AX->getTypeConstraintConcept(), 12717 AY->getTypeConstraintConcept()), 12718 As); 12719 } 12720 case Type::IncompleteArray: { 12721 const auto *AX = cast<IncompleteArrayType>(X), 12722 *AY = cast<IncompleteArrayType>(Y); 12723 return Ctx.getIncompleteArrayType( 12724 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12725 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12726 } 12727 case Type::DependentSizedArray: { 12728 const auto *AX = cast<DependentSizedArrayType>(X), 12729 *AY = cast<DependentSizedArrayType>(Y); 12730 return Ctx.getDependentSizedArrayType( 12731 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12732 getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY), 12733 getCommonIndexTypeCVRQualifiers(AX, AY), 12734 AX->getBracketsRange() == AY->getBracketsRange() 12735 ? AX->getBracketsRange() 12736 : SourceRange()); 12737 } 12738 case Type::ConstantArray: { 12739 const auto *AX = cast<ConstantArrayType>(X), 12740 *AY = cast<ConstantArrayType>(Y); 12741 assert(AX->getSize() == AY->getSize()); 12742 const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) 12743 ? AX->getSizeExpr() 12744 : nullptr; 12745 return Ctx.getConstantArrayType( 12746 getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, 12747 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12748 } 12749 case Type::Atomic: { 12750 const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); 12751 return Ctx.getAtomicType( 12752 Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType())); 12753 } 12754 case Type::Complex: { 12755 const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y); 12756 return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY)); 12757 } 12758 case Type::Pointer: { 12759 const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y); 12760 return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY)); 12761 } 12762 case Type::BlockPointer: { 12763 const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y); 12764 return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY)); 12765 } 12766 case Type::ObjCObjectPointer: { 12767 const auto *PX = cast<ObjCObjectPointerType>(X), 12768 *PY = cast<ObjCObjectPointerType>(Y); 12769 return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY)); 12770 } 12771 case Type::MemberPointer: { 12772 const auto *PX = cast<MemberPointerType>(X), 12773 *PY = cast<MemberPointerType>(Y); 12774 return Ctx.getMemberPointerType( 12775 getCommonPointeeType(Ctx, PX, PY), 12776 Ctx.getCommonSugaredType(QualType(PX->getClass(), 0), 12777 QualType(PY->getClass(), 0)) 12778 .getTypePtr()); 12779 } 12780 case Type::LValueReference: { 12781 const auto *PX = cast<LValueReferenceType>(X), 12782 *PY = cast<LValueReferenceType>(Y); 12783 // FIXME: Preserve PointeeTypeAsWritten. 12784 return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY), 12785 PX->isSpelledAsLValue() || 12786 PY->isSpelledAsLValue()); 12787 } 12788 case Type::RValueReference: { 12789 const auto *PX = cast<RValueReferenceType>(X), 12790 *PY = cast<RValueReferenceType>(Y); 12791 // FIXME: Preserve PointeeTypeAsWritten. 12792 return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY)); 12793 } 12794 case Type::DependentAddressSpace: { 12795 const auto *PX = cast<DependentAddressSpaceType>(X), 12796 *PY = cast<DependentAddressSpaceType>(Y); 12797 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr())); 12798 return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY), 12799 PX->getAddrSpaceExpr(), 12800 getCommonAttrLoc(PX, PY)); 12801 } 12802 case Type::FunctionNoProto: { 12803 const auto *FX = cast<FunctionNoProtoType>(X), 12804 *FY = cast<FunctionNoProtoType>(Y); 12805 assert(FX->getExtInfo() == FY->getExtInfo()); 12806 return Ctx.getFunctionNoProtoType( 12807 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()), 12808 FX->getExtInfo()); 12809 } 12810 case Type::FunctionProto: { 12811 const auto *FX = cast<FunctionProtoType>(X), 12812 *FY = cast<FunctionProtoType>(Y); 12813 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(), 12814 EPIY = FY->getExtProtoInfo(); 12815 assert(EPIX.ExtInfo == EPIY.ExtInfo); 12816 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos); 12817 assert(EPIX.RefQualifier == EPIY.RefQualifier); 12818 assert(EPIX.TypeQuals == EPIY.TypeQuals); 12819 assert(EPIX.Variadic == EPIY.Variadic); 12820 12821 // FIXME: Can we handle an empty EllipsisLoc? 12822 // Use emtpy EllipsisLoc if X and Y differ. 12823 12824 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn; 12825 12826 QualType R = 12827 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()); 12828 auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(), 12829 /*Unqualified=*/true); 12830 12831 SmallVector<QualType, 8> Exceptions; 12832 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs( 12833 EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true); 12834 return Ctx.getFunctionType(R, P, EPIX); 12835 } 12836 case Type::ObjCObject: { 12837 const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y); 12838 assert( 12839 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(), 12840 OY->getProtocols().begin(), OY->getProtocols().end(), 12841 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) { 12842 return P0->getCanonicalDecl() == P1->getCanonicalDecl(); 12843 }) && 12844 "protocol lists must be the same"); 12845 auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(), 12846 OY->getTypeArgsAsWritten()); 12847 return Ctx.getObjCObjectType( 12848 Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs, 12849 OX->getProtocols(), 12850 OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten()); 12851 } 12852 case Type::ConstantMatrix: { 12853 const auto *MX = cast<ConstantMatrixType>(X), 12854 *MY = cast<ConstantMatrixType>(Y); 12855 assert(MX->getNumRows() == MY->getNumRows()); 12856 assert(MX->getNumColumns() == MY->getNumColumns()); 12857 return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY), 12858 MX->getNumRows(), MX->getNumColumns()); 12859 } 12860 case Type::DependentSizedMatrix: { 12861 const auto *MX = cast<DependentSizedMatrixType>(X), 12862 *MY = cast<DependentSizedMatrixType>(Y); 12863 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr())); 12864 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr())); 12865 return Ctx.getDependentSizedMatrixType( 12866 getCommonElementType(Ctx, MX, MY), MX->getRowExpr(), 12867 MX->getColumnExpr(), getCommonAttrLoc(MX, MY)); 12868 } 12869 case Type::Vector: { 12870 const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y); 12871 assert(VX->getNumElements() == VY->getNumElements()); 12872 assert(VX->getVectorKind() == VY->getVectorKind()); 12873 return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY), 12874 VX->getNumElements(), VX->getVectorKind()); 12875 } 12876 case Type::ExtVector: { 12877 const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y); 12878 assert(VX->getNumElements() == VY->getNumElements()); 12879 return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY), 12880 VX->getNumElements()); 12881 } 12882 case Type::DependentSizedExtVector: { 12883 const auto *VX = cast<DependentSizedExtVectorType>(X), 12884 *VY = cast<DependentSizedExtVectorType>(Y); 12885 return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY), 12886 getCommonSizeExpr(Ctx, VX, VY), 12887 getCommonAttrLoc(VX, VY)); 12888 } 12889 case Type::DependentVector: { 12890 const auto *VX = cast<DependentVectorType>(X), 12891 *VY = cast<DependentVectorType>(Y); 12892 assert(VX->getVectorKind() == VY->getVectorKind()); 12893 return Ctx.getDependentVectorType( 12894 getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY), 12895 getCommonAttrLoc(VX, VY), VX->getVectorKind()); 12896 } 12897 case Type::InjectedClassName: { 12898 const auto *IX = cast<InjectedClassNameType>(X), 12899 *IY = cast<InjectedClassNameType>(Y); 12900 return Ctx.getInjectedClassNameType( 12901 getCommonDeclChecked(IX->getDecl(), IY->getDecl()), 12902 Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(), 12903 IY->getInjectedSpecializationType())); 12904 } 12905 case Type::TemplateSpecialization: { 12906 const auto *TX = cast<TemplateSpecializationType>(X), 12907 *TY = cast<TemplateSpecializationType>(Y); 12908 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12909 TY->template_arguments()); 12910 return Ctx.getTemplateSpecializationType( 12911 ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(), 12912 TY->getTemplateName()), 12913 As, X->getCanonicalTypeInternal()); 12914 } 12915 case Type::Decltype: { 12916 const auto *DX = cast<DecltypeType>(X); 12917 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Y); 12918 assert(DX->isDependentType()); 12919 assert(DY->isDependentType()); 12920 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr())); 12921 // As Decltype is not uniqued, building a common type would be wasteful. 12922 return QualType(DX, 0); 12923 } 12924 case Type::DependentName: { 12925 const auto *NX = cast<DependentNameType>(X), 12926 *NY = cast<DependentNameType>(Y); 12927 assert(NX->getIdentifier() == NY->getIdentifier()); 12928 return Ctx.getDependentNameType( 12929 getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY), 12930 NX->getIdentifier(), NX->getCanonicalTypeInternal()); 12931 } 12932 case Type::DependentTemplateSpecialization: { 12933 const auto *TX = cast<DependentTemplateSpecializationType>(X), 12934 *TY = cast<DependentTemplateSpecializationType>(Y); 12935 assert(TX->getIdentifier() == TY->getIdentifier()); 12936 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12937 TY->template_arguments()); 12938 return Ctx.getDependentTemplateSpecializationType( 12939 getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY), 12940 TX->getIdentifier(), As); 12941 } 12942 case Type::UnaryTransform: { 12943 const auto *TX = cast<UnaryTransformType>(X), 12944 *TY = cast<UnaryTransformType>(Y); 12945 assert(TX->getUTTKind() == TY->getUTTKind()); 12946 return Ctx.getUnaryTransformType( 12947 Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()), 12948 Ctx.getCommonSugaredType(TX->getUnderlyingType(), 12949 TY->getUnderlyingType()), 12950 TX->getUTTKind()); 12951 } 12952 case Type::PackExpansion: { 12953 const auto *PX = cast<PackExpansionType>(X), 12954 *PY = cast<PackExpansionType>(Y); 12955 assert(PX->getNumExpansions() == PY->getNumExpansions()); 12956 return Ctx.getPackExpansionType( 12957 Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()), 12958 PX->getNumExpansions(), false); 12959 } 12960 case Type::Pipe: { 12961 const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y); 12962 assert(PX->isReadOnly() == PY->isReadOnly()); 12963 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType 12964 : &ASTContext::getWritePipeType; 12965 return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY)); 12966 } 12967 case Type::TemplateTypeParm: { 12968 const auto *TX = cast<TemplateTypeParmType>(X), 12969 *TY = cast<TemplateTypeParmType>(Y); 12970 assert(TX->getDepth() == TY->getDepth()); 12971 assert(TX->getIndex() == TY->getIndex()); 12972 assert(TX->isParameterPack() == TY->isParameterPack()); 12973 return Ctx.getTemplateTypeParmType( 12974 TX->getDepth(), TX->getIndex(), TX->isParameterPack(), 12975 getCommonDecl(TX->getDecl(), TY->getDecl())); 12976 } 12977 } 12978 llvm_unreachable("Unknown Type Class"); 12979 } 12980 12981 static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, 12982 const Type *Y, 12983 SplitQualType Underlying) { 12984 Type::TypeClass TC = X->getTypeClass(); 12985 if (TC != Y->getTypeClass()) 12986 return QualType(); 12987 switch (TC) { 12988 #define UNEXPECTED_TYPE(Class, Kind) \ 12989 case Type::Class: \ 12990 llvm_unreachable("Unexpected " Kind ": " #Class); 12991 #define TYPE(Class, Base) 12992 #define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent") 12993 #include "clang/AST/TypeNodes.inc" 12994 12995 #define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical") 12996 CANONICAL_TYPE(Atomic) 12997 CANONICAL_TYPE(BitInt) 12998 CANONICAL_TYPE(BlockPointer) 12999 CANONICAL_TYPE(Builtin) 13000 CANONICAL_TYPE(Complex) 13001 CANONICAL_TYPE(ConstantArray) 13002 CANONICAL_TYPE(ConstantMatrix) 13003 CANONICAL_TYPE(Enum) 13004 CANONICAL_TYPE(ExtVector) 13005 CANONICAL_TYPE(FunctionNoProto) 13006 CANONICAL_TYPE(FunctionProto) 13007 CANONICAL_TYPE(IncompleteArray) 13008 CANONICAL_TYPE(LValueReference) 13009 CANONICAL_TYPE(MemberPointer) 13010 CANONICAL_TYPE(ObjCInterface) 13011 CANONICAL_TYPE(ObjCObject) 13012 CANONICAL_TYPE(ObjCObjectPointer) 13013 CANONICAL_TYPE(Pipe) 13014 CANONICAL_TYPE(Pointer) 13015 CANONICAL_TYPE(Record) 13016 CANONICAL_TYPE(RValueReference) 13017 CANONICAL_TYPE(VariableArray) 13018 CANONICAL_TYPE(Vector) 13019 #undef CANONICAL_TYPE 13020 13021 #undef UNEXPECTED_TYPE 13022 13023 case Type::Adjusted: { 13024 const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y); 13025 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType(); 13026 if (!Ctx.hasSameType(OX, OY)) 13027 return QualType(); 13028 // FIXME: It's inefficient to have to unify the original types. 13029 return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY), 13030 Ctx.getQualifiedType(Underlying)); 13031 } 13032 case Type::Decayed: { 13033 const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y); 13034 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType(); 13035 if (!Ctx.hasSameType(OX, OY)) 13036 return QualType(); 13037 // FIXME: It's inefficient to have to unify the original types. 13038 return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY), 13039 Ctx.getQualifiedType(Underlying)); 13040 } 13041 case Type::Attributed: { 13042 const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y); 13043 AttributedType::Kind Kind = AX->getAttrKind(); 13044 if (Kind != AY->getAttrKind()) 13045 return QualType(); 13046 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType(); 13047 if (!Ctx.hasSameType(MX, MY)) 13048 return QualType(); 13049 // FIXME: It's inefficient to have to unify the modified types. 13050 return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY), 13051 Ctx.getQualifiedType(Underlying)); 13052 } 13053 case Type::BTFTagAttributed: { 13054 const auto *BX = cast<BTFTagAttributedType>(X); 13055 const BTFTypeTagAttr *AX = BX->getAttr(); 13056 // The attribute is not uniqued, so just compare the tag. 13057 if (AX->getBTFTypeTag() != 13058 cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag()) 13059 return QualType(); 13060 return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying)); 13061 } 13062 case Type::Auto: { 13063 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 13064 13065 AutoTypeKeyword KW = AX->getKeyword(); 13066 if (KW != AY->getKeyword()) 13067 return QualType(); 13068 13069 ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(), 13070 AY->getTypeConstraintConcept()); 13071 SmallVector<TemplateArgument, 8> As; 13072 if (CD && 13073 getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(), 13074 AY->getTypeConstraintArguments())) { 13075 CD = nullptr; // The arguments differ, so make it unconstrained. 13076 As.clear(); 13077 } 13078 13079 // Both auto types can't be dependent, otherwise they wouldn't have been 13080 // sugar. This implies they can't contain unexpanded packs either. 13081 return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), 13082 /*IsDependent=*/false, /*IsPack=*/false, CD, As); 13083 } 13084 case Type::Decltype: 13085 return QualType(); 13086 case Type::DeducedTemplateSpecialization: 13087 // FIXME: Try to merge these. 13088 return QualType(); 13089 13090 case Type::Elaborated: { 13091 const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y); 13092 return Ctx.getElaboratedType( 13093 ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY), 13094 Ctx.getQualifiedType(Underlying), 13095 ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl())); 13096 } 13097 case Type::MacroQualified: { 13098 const auto *MX = cast<MacroQualifiedType>(X), 13099 *MY = cast<MacroQualifiedType>(Y); 13100 const IdentifierInfo *IX = MX->getMacroIdentifier(); 13101 if (IX != MY->getMacroIdentifier()) 13102 return QualType(); 13103 return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX); 13104 } 13105 case Type::SubstTemplateTypeParm: { 13106 const auto *SX = cast<SubstTemplateTypeParmType>(X), 13107 *SY = cast<SubstTemplateTypeParmType>(Y); 13108 Decl *CD = 13109 ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl()); 13110 if (!CD) 13111 return QualType(); 13112 unsigned Index = SX->getIndex(); 13113 if (Index != SY->getIndex()) 13114 return QualType(); 13115 auto PackIndex = SX->getPackIndex(); 13116 if (PackIndex != SY->getPackIndex()) 13117 return QualType(); 13118 return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying), 13119 CD, Index, PackIndex); 13120 } 13121 case Type::ObjCTypeParam: 13122 // FIXME: Try to merge these. 13123 return QualType(); 13124 case Type::Paren: 13125 return Ctx.getParenType(Ctx.getQualifiedType(Underlying)); 13126 13127 case Type::TemplateSpecialization: { 13128 const auto *TX = cast<TemplateSpecializationType>(X), 13129 *TY = cast<TemplateSpecializationType>(Y); 13130 TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(), 13131 TY->getTemplateName()); 13132 if (!CTN.getAsVoidPointer()) 13133 return QualType(); 13134 SmallVector<TemplateArgument, 8> Args; 13135 if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(), 13136 TY->template_arguments())) 13137 return QualType(); 13138 return Ctx.getTemplateSpecializationType(CTN, Args, 13139 Ctx.getQualifiedType(Underlying)); 13140 } 13141 case Type::Typedef: { 13142 const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y); 13143 const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl()); 13144 if (!CD) 13145 return QualType(); 13146 return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying)); 13147 } 13148 case Type::TypeOf: { 13149 // The common sugar between two typeof expressions, where one is 13150 // potentially a typeof_unqual and the other is not, we unify to the 13151 // qualified type as that retains the most information along with the type. 13152 // We only return a typeof_unqual type when both types are unqual types. 13153 TypeOfKind Kind = TypeOfKind::Qualified; 13154 if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() && 13155 cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified) 13156 Kind = TypeOfKind::Unqualified; 13157 return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind); 13158 } 13159 case Type::TypeOfExpr: 13160 return QualType(); 13161 13162 case Type::UnaryTransform: { 13163 const auto *UX = cast<UnaryTransformType>(X), 13164 *UY = cast<UnaryTransformType>(Y); 13165 UnaryTransformType::UTTKind KX = UX->getUTTKind(); 13166 if (KX != UY->getUTTKind()) 13167 return QualType(); 13168 QualType BX = UX->getBaseType(), BY = UY->getBaseType(); 13169 if (!Ctx.hasSameType(BX, BY)) 13170 return QualType(); 13171 // FIXME: It's inefficient to have to unify the base types. 13172 return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY), 13173 Ctx.getQualifiedType(Underlying), KX); 13174 } 13175 case Type::Using: { 13176 const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y); 13177 const UsingShadowDecl *CD = 13178 ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl()); 13179 if (!CD) 13180 return QualType(); 13181 return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); 13182 } 13183 } 13184 llvm_unreachable("Unhandled Type Class"); 13185 } 13186 13187 static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) { 13188 SmallVector<SplitQualType, 8> R; 13189 while (true) { 13190 QTotal.addConsistentQualifiers(T.Quals); 13191 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); 13192 if (NT == QualType(T.Ty, 0)) 13193 break; 13194 R.push_back(T); 13195 T = NT.split(); 13196 } 13197 return R; 13198 } 13199 13200 QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, 13201 bool Unqualified) { 13202 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y)); 13203 if (X == Y) 13204 return X; 13205 if (!Unqualified) { 13206 if (X.isCanonical()) 13207 return X; 13208 if (Y.isCanonical()) 13209 return Y; 13210 } 13211 13212 SplitQualType SX = X.split(), SY = Y.split(); 13213 Qualifiers QX, QY; 13214 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys, 13215 // until we reach their underlying "canonical nodes". Note these are not 13216 // necessarily canonical types, as they may still have sugared properties. 13217 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively. 13218 auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY); 13219 if (SX.Ty != SY.Ty) { 13220 // The canonical nodes differ. Build a common canonical node out of the two, 13221 // unifying their sugar. This may recurse back here. 13222 SX.Ty = 13223 ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr(); 13224 } else { 13225 // The canonical nodes were identical: We may have desugared too much. 13226 // Add any common sugar back in. 13227 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) { 13228 QX -= SX.Quals; 13229 QY -= SY.Quals; 13230 SX = Xs.pop_back_val(); 13231 SY = Ys.pop_back_val(); 13232 } 13233 } 13234 if (Unqualified) 13235 QX = Qualifiers::removeCommonQualifiers(QX, QY); 13236 else 13237 assert(QX == QY); 13238 13239 // Even though the remaining sugar nodes in Xs and Ys differ, some may be 13240 // related. Walk up these nodes, unifying them and adding the result. 13241 while (!Xs.empty() && !Ys.empty()) { 13242 auto Underlying = SplitQualType( 13243 SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals)); 13244 SX = Xs.pop_back_val(); 13245 SY = Ys.pop_back_val(); 13246 SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying) 13247 .getTypePtrOrNull(); 13248 // Stop at the first pair which is unrelated. 13249 if (!SX.Ty) { 13250 SX.Ty = Underlying.Ty; 13251 break; 13252 } 13253 QX -= Underlying.Quals; 13254 }; 13255 13256 // Add back the missing accumulated qualifiers, which were stripped off 13257 // with the sugar nodes we could not unify. 13258 QualType R = getQualifiedType(SX.Ty, QX); 13259 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X)); 13260 return R; 13261 } 13262 13263 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 13264 assert(Ty->isFixedPointType()); 13265 13266 if (Ty->isSaturatedFixedPointType()) return Ty; 13267 13268 switch (Ty->castAs<BuiltinType>()->getKind()) { 13269 default: 13270 llvm_unreachable("Not a fixed point type!"); 13271 case BuiltinType::ShortAccum: 13272 return SatShortAccumTy; 13273 case BuiltinType::Accum: 13274 return SatAccumTy; 13275 case BuiltinType::LongAccum: 13276 return SatLongAccumTy; 13277 case BuiltinType::UShortAccum: 13278 return SatUnsignedShortAccumTy; 13279 case BuiltinType::UAccum: 13280 return SatUnsignedAccumTy; 13281 case BuiltinType::ULongAccum: 13282 return SatUnsignedLongAccumTy; 13283 case BuiltinType::ShortFract: 13284 return SatShortFractTy; 13285 case BuiltinType::Fract: 13286 return SatFractTy; 13287 case BuiltinType::LongFract: 13288 return SatLongFractTy; 13289 case BuiltinType::UShortFract: 13290 return SatUnsignedShortFractTy; 13291 case BuiltinType::UFract: 13292 return SatUnsignedFractTy; 13293 case BuiltinType::ULongFract: 13294 return SatUnsignedLongFractTy; 13295 } 13296 } 13297 13298 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 13299 if (LangOpts.OpenCL) 13300 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 13301 13302 if (LangOpts.CUDA) 13303 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 13304 13305 return getLangASFromTargetAS(AS); 13306 } 13307 13308 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 13309 // doesn't include ASTContext.h 13310 template 13311 clang::LazyGenerationalUpdatePtr< 13312 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 13313 clang::LazyGenerationalUpdatePtr< 13314 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 13315 const clang::ASTContext &Ctx, Decl *Value); 13316 13317 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 13318 assert(Ty->isFixedPointType()); 13319 13320 const TargetInfo &Target = getTargetInfo(); 13321 switch (Ty->castAs<BuiltinType>()->getKind()) { 13322 default: 13323 llvm_unreachable("Not a fixed point type!"); 13324 case BuiltinType::ShortAccum: 13325 case BuiltinType::SatShortAccum: 13326 return Target.getShortAccumScale(); 13327 case BuiltinType::Accum: 13328 case BuiltinType::SatAccum: 13329 return Target.getAccumScale(); 13330 case BuiltinType::LongAccum: 13331 case BuiltinType::SatLongAccum: 13332 return Target.getLongAccumScale(); 13333 case BuiltinType::UShortAccum: 13334 case BuiltinType::SatUShortAccum: 13335 return Target.getUnsignedShortAccumScale(); 13336 case BuiltinType::UAccum: 13337 case BuiltinType::SatUAccum: 13338 return Target.getUnsignedAccumScale(); 13339 case BuiltinType::ULongAccum: 13340 case BuiltinType::SatULongAccum: 13341 return Target.getUnsignedLongAccumScale(); 13342 case BuiltinType::ShortFract: 13343 case BuiltinType::SatShortFract: 13344 return Target.getShortFractScale(); 13345 case BuiltinType::Fract: 13346 case BuiltinType::SatFract: 13347 return Target.getFractScale(); 13348 case BuiltinType::LongFract: 13349 case BuiltinType::SatLongFract: 13350 return Target.getLongFractScale(); 13351 case BuiltinType::UShortFract: 13352 case BuiltinType::SatUShortFract: 13353 return Target.getUnsignedShortFractScale(); 13354 case BuiltinType::UFract: 13355 case BuiltinType::SatUFract: 13356 return Target.getUnsignedFractScale(); 13357 case BuiltinType::ULongFract: 13358 case BuiltinType::SatULongFract: 13359 return Target.getUnsignedLongFractScale(); 13360 } 13361 } 13362 13363 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 13364 assert(Ty->isFixedPointType()); 13365 13366 const TargetInfo &Target = getTargetInfo(); 13367 switch (Ty->castAs<BuiltinType>()->getKind()) { 13368 default: 13369 llvm_unreachable("Not a fixed point type!"); 13370 case BuiltinType::ShortAccum: 13371 case BuiltinType::SatShortAccum: 13372 return Target.getShortAccumIBits(); 13373 case BuiltinType::Accum: 13374 case BuiltinType::SatAccum: 13375 return Target.getAccumIBits(); 13376 case BuiltinType::LongAccum: 13377 case BuiltinType::SatLongAccum: 13378 return Target.getLongAccumIBits(); 13379 case BuiltinType::UShortAccum: 13380 case BuiltinType::SatUShortAccum: 13381 return Target.getUnsignedShortAccumIBits(); 13382 case BuiltinType::UAccum: 13383 case BuiltinType::SatUAccum: 13384 return Target.getUnsignedAccumIBits(); 13385 case BuiltinType::ULongAccum: 13386 case BuiltinType::SatULongAccum: 13387 return Target.getUnsignedLongAccumIBits(); 13388 case BuiltinType::ShortFract: 13389 case BuiltinType::SatShortFract: 13390 case BuiltinType::Fract: 13391 case BuiltinType::SatFract: 13392 case BuiltinType::LongFract: 13393 case BuiltinType::SatLongFract: 13394 case BuiltinType::UShortFract: 13395 case BuiltinType::SatUShortFract: 13396 case BuiltinType::UFract: 13397 case BuiltinType::SatUFract: 13398 case BuiltinType::ULongFract: 13399 case BuiltinType::SatULongFract: 13400 return 0; 13401 } 13402 } 13403 13404 llvm::FixedPointSemantics 13405 ASTContext::getFixedPointSemantics(QualType Ty) const { 13406 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 13407 "Can only get the fixed point semantics for a " 13408 "fixed point or integer type."); 13409 if (Ty->isIntegerType()) 13410 return llvm::FixedPointSemantics::GetIntegerSemantics( 13411 getIntWidth(Ty), Ty->isSignedIntegerType()); 13412 13413 bool isSigned = Ty->isSignedFixedPointType(); 13414 return llvm::FixedPointSemantics( 13415 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 13416 Ty->isSaturatedFixedPointType(), 13417 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 13418 } 13419 13420 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 13421 assert(Ty->isFixedPointType()); 13422 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 13423 } 13424 13425 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 13426 assert(Ty->isFixedPointType()); 13427 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 13428 } 13429 13430 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 13431 assert(Ty->isUnsignedFixedPointType() && 13432 "Expected unsigned fixed point type"); 13433 13434 switch (Ty->castAs<BuiltinType>()->getKind()) { 13435 case BuiltinType::UShortAccum: 13436 return ShortAccumTy; 13437 case BuiltinType::UAccum: 13438 return AccumTy; 13439 case BuiltinType::ULongAccum: 13440 return LongAccumTy; 13441 case BuiltinType::SatUShortAccum: 13442 return SatShortAccumTy; 13443 case BuiltinType::SatUAccum: 13444 return SatAccumTy; 13445 case BuiltinType::SatULongAccum: 13446 return SatLongAccumTy; 13447 case BuiltinType::UShortFract: 13448 return ShortFractTy; 13449 case BuiltinType::UFract: 13450 return FractTy; 13451 case BuiltinType::ULongFract: 13452 return LongFractTy; 13453 case BuiltinType::SatUShortFract: 13454 return SatShortFractTy; 13455 case BuiltinType::SatUFract: 13456 return SatFractTy; 13457 case BuiltinType::SatULongFract: 13458 return SatLongFractTy; 13459 default: 13460 llvm_unreachable("Unexpected unsigned fixed point type"); 13461 } 13462 } 13463 13464 std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs( 13465 const TargetVersionAttr *TV) const { 13466 assert(TV != nullptr); 13467 llvm::SmallVector<StringRef, 8> Feats; 13468 std::vector<std::string> ResFeats; 13469 TV->getFeatures(Feats); 13470 for (auto &Feature : Feats) 13471 if (Target->validateCpuSupports(Feature.str())) 13472 // Use '?' to mark features that came from TargetVersion. 13473 ResFeats.push_back("?" + Feature.str()); 13474 return ResFeats; 13475 } 13476 13477 ParsedTargetAttr 13478 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 13479 assert(TD != nullptr); 13480 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr()); 13481 13482 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 13483 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 13484 }); 13485 return ParsedAttr; 13486 } 13487 13488 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13489 const FunctionDecl *FD) const { 13490 if (FD) 13491 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 13492 else 13493 Target->initFeatureMap(FeatureMap, getDiagnostics(), 13494 Target->getTargetOpts().CPU, 13495 Target->getTargetOpts().Features); 13496 } 13497 13498 // Fills in the supplied string map with the set of target features for the 13499 // passed in function. 13500 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13501 GlobalDecl GD) const { 13502 StringRef TargetCPU = Target->getTargetOpts().CPU; 13503 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 13504 if (const auto *TD = FD->getAttr<TargetAttr>()) { 13505 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 13506 13507 // Make a copy of the features as passed on the command line into the 13508 // beginning of the additional features from the function to override. 13509 ParsedAttr.Features.insert( 13510 ParsedAttr.Features.begin(), 13511 Target->getTargetOpts().FeaturesAsWritten.begin(), 13512 Target->getTargetOpts().FeaturesAsWritten.end()); 13513 13514 if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) 13515 TargetCPU = ParsedAttr.CPU; 13516 13517 // Now populate the feature map, first with the TargetCPU which is either 13518 // the default or a new one from the target attribute string. Then we'll use 13519 // the passed in features (FeaturesAsWritten) along with the new ones from 13520 // the attribute. 13521 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 13522 ParsedAttr.Features); 13523 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 13524 llvm::SmallVector<StringRef, 32> FeaturesTmp; 13525 Target->getCPUSpecificCPUDispatchFeatures( 13526 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 13527 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 13528 Features.insert(Features.begin(), 13529 Target->getTargetOpts().FeaturesAsWritten.begin(), 13530 Target->getTargetOpts().FeaturesAsWritten.end()); 13531 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13532 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 13533 std::vector<std::string> Features; 13534 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 13535 if (Target->getTriple().isAArch64()) { 13536 // TargetClones for AArch64 13537 if (VersionStr != "default") { 13538 SmallVector<StringRef, 1> VersionFeatures; 13539 VersionStr.split(VersionFeatures, "+"); 13540 for (auto &VFeature : VersionFeatures) { 13541 VFeature = VFeature.trim(); 13542 // Use '?' to mark features that came from AArch64 TargetClones. 13543 Features.push_back((StringRef{"?"} + VFeature).str()); 13544 } 13545 } 13546 Features.insert(Features.begin(), 13547 Target->getTargetOpts().FeaturesAsWritten.begin(), 13548 Target->getTargetOpts().FeaturesAsWritten.end()); 13549 } else { 13550 if (VersionStr.starts_with("arch=")) 13551 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 13552 else if (VersionStr != "default") 13553 Features.push_back((StringRef{"+"} + VersionStr).str()); 13554 } 13555 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13556 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { 13557 std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV); 13558 Feats.insert(Feats.begin(), 13559 Target->getTargetOpts().FeaturesAsWritten.begin(), 13560 Target->getTargetOpts().FeaturesAsWritten.end()); 13561 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats); 13562 } else { 13563 FeatureMap = Target->getTargetOpts().FeatureMap; 13564 } 13565 } 13566 13567 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 13568 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 13569 return *OMPTraitInfoVector.back(); 13570 } 13571 13572 const StreamingDiagnostic &clang:: 13573 operator<<(const StreamingDiagnostic &DB, 13574 const ASTContext::SectionInfo &Section) { 13575 if (Section.Decl) 13576 return DB << Section.Decl; 13577 return DB << "a prior #pragma section"; 13578 } 13579 13580 bool ASTContext::mayExternalize(const Decl *D) const { 13581 bool IsInternalVar = 13582 isa<VarDecl>(D) && 13583 basicGVALinkageForVariable(*this, cast<VarDecl>(D)) == GVA_Internal; 13584 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 13585 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 13586 (D->hasAttr<CUDAConstantAttr>() && 13587 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 13588 // CUDA/HIP: managed variables need to be externalized since it is 13589 // a declaration in IR, therefore cannot have internal linkage. Kernels in 13590 // anonymous name space needs to be externalized to avoid duplicate symbols. 13591 return (IsInternalVar && 13592 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 13593 (D->hasAttr<CUDAGlobalAttr>() && 13594 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 13595 GVA_Internal); 13596 } 13597 13598 bool ASTContext::shouldExternalize(const Decl *D) const { 13599 return mayExternalize(D) && 13600 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 13601 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 13602 } 13603 13604 StringRef ASTContext::getCUIDHash() const { 13605 if (!CUIDHash.empty()) 13606 return CUIDHash; 13607 if (LangOpts.CUID.empty()) 13608 return StringRef(); 13609 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 13610 return CUIDHash; 13611 } 13612