1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/SourceLocation.h" 62 #include "clang/Basic/SourceManager.h" 63 #include "clang/Basic/Specifiers.h" 64 #include "clang/Basic/TargetCXXABI.h" 65 #include "clang/Basic/TargetInfo.h" 66 #include "clang/Basic/XRayLists.h" 67 #include "llvm/ADT/APFixedPoint.h" 68 #include "llvm/ADT/APInt.h" 69 #include "llvm/ADT/APSInt.h" 70 #include "llvm/ADT/ArrayRef.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/DenseSet.h" 73 #include "llvm/ADT/FoldingSet.h" 74 #include "llvm/ADT/PointerUnion.h" 75 #include "llvm/ADT/STLExtras.h" 76 #include "llvm/ADT/SmallPtrSet.h" 77 #include "llvm/ADT/SmallVector.h" 78 #include "llvm/ADT/StringExtras.h" 79 #include "llvm/ADT/StringRef.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/Capacity.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ErrorHandling.h" 85 #include "llvm/Support/MD5.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include <algorithm> 89 #include <cassert> 90 #include <cstddef> 91 #include <cstdint> 92 #include <cstdlib> 93 #include <map> 94 #include <memory> 95 #include <optional> 96 #include <string> 97 #include <tuple> 98 #include <utility> 99 100 using namespace clang; 101 102 enum FloatingRank { 103 BFloat16Rank, 104 Float16Rank, 105 HalfRank, 106 FloatRank, 107 DoubleRank, 108 LongDoubleRank, 109 Float128Rank, 110 Ibm128Rank 111 }; 112 113 /// \returns location that is relevant when searching for Doc comments related 114 /// to \p D. 115 static SourceLocation getDeclLocForCommentSearch(const Decl *D, 116 SourceManager &SourceMgr) { 117 assert(D); 118 119 // User can not attach documentation to implicit declarations. 120 if (D->isImplicit()) 121 return {}; 122 123 // User can not attach documentation to implicit instantiations. 124 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 125 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 126 return {}; 127 } 128 129 if (const auto *VD = dyn_cast<VarDecl>(D)) { 130 if (VD->isStaticDataMember() && 131 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 132 return {}; 133 } 134 135 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 136 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 137 return {}; 138 } 139 140 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 141 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 142 if (TSK == TSK_ImplicitInstantiation || 143 TSK == TSK_Undeclared) 144 return {}; 145 } 146 147 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 148 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 149 return {}; 150 } 151 if (const auto *TD = dyn_cast<TagDecl>(D)) { 152 // When tag declaration (but not definition!) is part of the 153 // decl-specifier-seq of some other declaration, it doesn't get comment 154 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 155 return {}; 156 } 157 // TODO: handle comments for function parameters properly. 158 if (isa<ParmVarDecl>(D)) 159 return {}; 160 161 // TODO: we could look up template parameter documentation in the template 162 // documentation. 163 if (isa<TemplateTypeParmDecl>(D) || 164 isa<NonTypeTemplateParmDecl>(D) || 165 isa<TemplateTemplateParmDecl>(D)) 166 return {}; 167 168 // Find declaration location. 169 // For Objective-C declarations we generally don't expect to have multiple 170 // declarators, thus use declaration starting location as the "declaration 171 // location". 172 // For all other declarations multiple declarators are used quite frequently, 173 // so we use the location of the identifier as the "declaration location". 174 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 175 isa<ObjCPropertyDecl>(D) || 176 isa<RedeclarableTemplateDecl>(D) || 177 isa<ClassTemplateSpecializationDecl>(D) || 178 // Allow association with Y across {} in `typedef struct X {} Y`. 179 isa<TypedefDecl>(D)) 180 return D->getBeginLoc(); 181 182 const SourceLocation DeclLoc = D->getLocation(); 183 if (DeclLoc.isMacroID()) { 184 if (isa<TypedefDecl>(D)) { 185 // If location of the typedef name is in a macro, it is because being 186 // declared via a macro. Try using declaration's starting location as 187 // the "declaration location". 188 return D->getBeginLoc(); 189 } 190 191 if (const auto *TD = dyn_cast<TagDecl>(D)) { 192 // If location of the tag decl is inside a macro, but the spelling of 193 // the tag name comes from a macro argument, it looks like a special 194 // macro like NS_ENUM is being used to define the tag decl. In that 195 // case, adjust the source location to the expansion loc so that we can 196 // attach the comment to the tag decl. 197 if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition()) 198 return SourceMgr.getExpansionLoc(DeclLoc); 199 } 200 } 201 202 return DeclLoc; 203 } 204 205 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 206 const Decl *D, const SourceLocation RepresentativeLocForDecl, 207 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 208 // If the declaration doesn't map directly to a location in a file, we 209 // can't find the comment. 210 if (RepresentativeLocForDecl.isInvalid() || 211 !RepresentativeLocForDecl.isFileID()) 212 return nullptr; 213 214 // If there are no comments anywhere, we won't find anything. 215 if (CommentsInTheFile.empty()) 216 return nullptr; 217 218 // Decompose the location for the declaration and find the beginning of the 219 // file buffer. 220 const std::pair<FileID, unsigned> DeclLocDecomp = 221 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 222 223 // Slow path. 224 auto OffsetCommentBehindDecl = 225 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 226 227 // First check whether we have a trailing comment. 228 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 229 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 230 if ((CommentBehindDecl->isDocumentation() || 231 LangOpts.CommentOpts.ParseAllComments) && 232 CommentBehindDecl->isTrailingComment() && 233 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 234 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 235 236 // Check that Doxygen trailing comment comes after the declaration, starts 237 // on the same line and in the same file as the declaration. 238 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 239 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 240 OffsetCommentBehindDecl->first)) { 241 return CommentBehindDecl; 242 } 243 } 244 } 245 246 // The comment just after the declaration was not a trailing comment. 247 // Let's look at the previous comment. 248 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 249 return nullptr; 250 251 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 252 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 253 254 // Check that we actually have a non-member Doxygen comment. 255 if (!(CommentBeforeDecl->isDocumentation() || 256 LangOpts.CommentOpts.ParseAllComments) || 257 CommentBeforeDecl->isTrailingComment()) 258 return nullptr; 259 260 // Decompose the end of the comment. 261 const unsigned CommentEndOffset = 262 Comments.getCommentEndOffset(CommentBeforeDecl); 263 264 // Get the corresponding buffer. 265 bool Invalid = false; 266 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 267 &Invalid).data(); 268 if (Invalid) 269 return nullptr; 270 271 // Extract text between the comment and declaration. 272 StringRef Text(Buffer + CommentEndOffset, 273 DeclLocDecomp.second - CommentEndOffset); 274 275 // There should be no other declarations or preprocessor directives between 276 // comment and declaration. 277 if (Text.find_first_of(";{}#@") != StringRef::npos) 278 return nullptr; 279 280 return CommentBeforeDecl; 281 } 282 283 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 284 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 285 286 // If the declaration doesn't map directly to a location in a file, we 287 // can't find the comment. 288 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 289 return nullptr; 290 291 if (ExternalSource && !CommentsLoaded) { 292 ExternalSource->ReadComments(); 293 CommentsLoaded = true; 294 } 295 296 if (Comments.empty()) 297 return nullptr; 298 299 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 300 if (!File.isValid()) { 301 return nullptr; 302 } 303 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 304 if (!CommentsInThisFile || CommentsInThisFile->empty()) 305 return nullptr; 306 307 return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); 308 } 309 310 void ASTContext::addComment(const RawComment &RC) { 311 assert(LangOpts.RetainCommentsFromSystemHeaders || 312 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 313 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 314 } 315 316 /// If we have a 'templated' declaration for a template, adjust 'D' to 317 /// refer to the actual template. 318 /// If we have an implicit instantiation, adjust 'D' to refer to template. 319 static const Decl &adjustDeclToTemplate(const Decl &D) { 320 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 321 // Is this function declaration part of a function template? 322 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 323 return *FTD; 324 325 // Nothing to do if function is not an implicit instantiation. 326 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 327 return D; 328 329 // Function is an implicit instantiation of a function template? 330 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 331 return *FTD; 332 333 // Function is instantiated from a member definition of a class template? 334 if (const FunctionDecl *MemberDecl = 335 FD->getInstantiatedFromMemberFunction()) 336 return *MemberDecl; 337 338 return D; 339 } 340 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 341 // Static data member is instantiated from a member definition of a class 342 // template? 343 if (VD->isStaticDataMember()) 344 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 345 return *MemberDecl; 346 347 return D; 348 } 349 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 350 // Is this class declaration part of a class template? 351 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 352 return *CTD; 353 354 // Class is an implicit instantiation of a class template or partial 355 // specialization? 356 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 357 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 358 return D; 359 llvm::PointerUnion<ClassTemplateDecl *, 360 ClassTemplatePartialSpecializationDecl *> 361 PU = CTSD->getSpecializedTemplateOrPartial(); 362 return PU.is<ClassTemplateDecl *>() 363 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 364 : *static_cast<const Decl *>( 365 PU.get<ClassTemplatePartialSpecializationDecl *>()); 366 } 367 368 // Class is instantiated from a member definition of a class template? 369 if (const MemberSpecializationInfo *Info = 370 CRD->getMemberSpecializationInfo()) 371 return *Info->getInstantiatedFrom(); 372 373 return D; 374 } 375 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 376 // Enum is instantiated from a member definition of a class template? 377 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 378 return *MemberDecl; 379 380 return D; 381 } 382 // FIXME: Adjust alias templates? 383 return D; 384 } 385 386 const RawComment *ASTContext::getRawCommentForAnyRedecl( 387 const Decl *D, 388 const Decl **OriginalDecl) const { 389 if (!D) { 390 if (OriginalDecl) 391 OriginalDecl = nullptr; 392 return nullptr; 393 } 394 395 D = &adjustDeclToTemplate(*D); 396 397 // Any comment directly attached to D? 398 { 399 auto DeclComment = DeclRawComments.find(D); 400 if (DeclComment != DeclRawComments.end()) { 401 if (OriginalDecl) 402 *OriginalDecl = D; 403 return DeclComment->second; 404 } 405 } 406 407 // Any comment attached to any redeclaration of D? 408 const Decl *CanonicalD = D->getCanonicalDecl(); 409 if (!CanonicalD) 410 return nullptr; 411 412 { 413 auto RedeclComment = RedeclChainComments.find(CanonicalD); 414 if (RedeclComment != RedeclChainComments.end()) { 415 if (OriginalDecl) 416 *OriginalDecl = RedeclComment->second; 417 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 418 assert(CommentAtRedecl != DeclRawComments.end() && 419 "This decl is supposed to have comment attached."); 420 return CommentAtRedecl->second; 421 } 422 } 423 424 // Any redeclarations of D that we haven't checked for comments yet? 425 // We can't use DenseMap::iterator directly since it'd get invalid. 426 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 427 auto LookupRes = CommentlessRedeclChains.find(CanonicalD); 428 if (LookupRes != CommentlessRedeclChains.end()) 429 return LookupRes->second; 430 return nullptr; 431 }(); 432 433 for (const auto Redecl : D->redecls()) { 434 assert(Redecl); 435 // Skip all redeclarations that have been checked previously. 436 if (LastCheckedRedecl) { 437 if (LastCheckedRedecl == Redecl) { 438 LastCheckedRedecl = nullptr; 439 } 440 continue; 441 } 442 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 443 if (RedeclComment) { 444 cacheRawCommentForDecl(*Redecl, *RedeclComment); 445 if (OriginalDecl) 446 *OriginalDecl = Redecl; 447 return RedeclComment; 448 } 449 CommentlessRedeclChains[CanonicalD] = Redecl; 450 } 451 452 if (OriginalDecl) 453 *OriginalDecl = nullptr; 454 return nullptr; 455 } 456 457 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 458 const RawComment &Comment) const { 459 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 460 DeclRawComments.try_emplace(&OriginalD, &Comment); 461 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 462 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 463 CommentlessRedeclChains.erase(CanonicalDecl); 464 } 465 466 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 467 SmallVectorImpl<const NamedDecl *> &Redeclared) { 468 const DeclContext *DC = ObjCMethod->getDeclContext(); 469 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 470 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 471 if (!ID) 472 return; 473 // Add redeclared method here. 474 for (const auto *Ext : ID->known_extensions()) { 475 if (ObjCMethodDecl *RedeclaredMethod = 476 Ext->getMethod(ObjCMethod->getSelector(), 477 ObjCMethod->isInstanceMethod())) 478 Redeclared.push_back(RedeclaredMethod); 479 } 480 } 481 } 482 483 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 484 const Preprocessor *PP) { 485 if (Comments.empty() || Decls.empty()) 486 return; 487 488 FileID File; 489 for (Decl *D : Decls) { 490 SourceLocation Loc = D->getLocation(); 491 if (Loc.isValid()) { 492 // See if there are any new comments that are not attached to a decl. 493 // The location doesn't have to be precise - we care only about the file. 494 File = SourceMgr.getDecomposedLoc(Loc).first; 495 break; 496 } 497 } 498 499 if (File.isInvalid()) 500 return; 501 502 auto CommentsInThisFile = Comments.getCommentsInFile(File); 503 if (!CommentsInThisFile || CommentsInThisFile->empty() || 504 CommentsInThisFile->rbegin()->second->isAttached()) 505 return; 506 507 // There is at least one comment not attached to a decl. 508 // Maybe it should be attached to one of Decls? 509 // 510 // Note that this way we pick up not only comments that precede the 511 // declaration, but also comments that *follow* the declaration -- thanks to 512 // the lookahead in the lexer: we've consumed the semicolon and looked 513 // ahead through comments. 514 515 for (const Decl *D : Decls) { 516 assert(D); 517 if (D->isInvalidDecl()) 518 continue; 519 520 D = &adjustDeclToTemplate(*D); 521 522 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 523 524 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 525 continue; 526 527 if (DeclRawComments.count(D) > 0) 528 continue; 529 530 if (RawComment *const DocComment = 531 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { 532 cacheRawCommentForDecl(*D, *DocComment); 533 comments::FullComment *FC = DocComment->parse(*this, PP, D); 534 ParsedComments[D->getCanonicalDecl()] = FC; 535 } 536 } 537 } 538 539 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 540 const Decl *D) const { 541 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 542 ThisDeclInfo->CommentDecl = D; 543 ThisDeclInfo->IsFilled = false; 544 ThisDeclInfo->fill(); 545 ThisDeclInfo->CommentDecl = FC->getDecl(); 546 if (!ThisDeclInfo->TemplateParameters) 547 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 548 comments::FullComment *CFC = 549 new (*this) comments::FullComment(FC->getBlocks(), 550 ThisDeclInfo); 551 return CFC; 552 } 553 554 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 555 const RawComment *RC = getRawCommentForDeclNoCache(D); 556 return RC ? RC->parse(*this, nullptr, D) : nullptr; 557 } 558 559 comments::FullComment *ASTContext::getCommentForDecl( 560 const Decl *D, 561 const Preprocessor *PP) const { 562 if (!D || D->isInvalidDecl()) 563 return nullptr; 564 D = &adjustDeclToTemplate(*D); 565 566 const Decl *Canonical = D->getCanonicalDecl(); 567 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 568 ParsedComments.find(Canonical); 569 570 if (Pos != ParsedComments.end()) { 571 if (Canonical != D) { 572 comments::FullComment *FC = Pos->second; 573 comments::FullComment *CFC = cloneFullComment(FC, D); 574 return CFC; 575 } 576 return Pos->second; 577 } 578 579 const Decl *OriginalDecl = nullptr; 580 581 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 582 if (!RC) { 583 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 584 SmallVector<const NamedDecl*, 8> Overridden; 585 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 586 if (OMD && OMD->isPropertyAccessor()) 587 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 588 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 589 return cloneFullComment(FC, D); 590 if (OMD) 591 addRedeclaredMethods(OMD, Overridden); 592 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 593 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 594 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 595 return cloneFullComment(FC, D); 596 } 597 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 598 // Attach any tag type's documentation to its typedef if latter 599 // does not have one of its own. 600 QualType QT = TD->getUnderlyingType(); 601 if (const auto *TT = QT->getAs<TagType>()) 602 if (const Decl *TD = TT->getDecl()) 603 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 604 return cloneFullComment(FC, D); 605 } 606 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 607 while (IC->getSuperClass()) { 608 IC = IC->getSuperClass(); 609 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 610 return cloneFullComment(FC, D); 611 } 612 } 613 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 614 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 615 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 616 return cloneFullComment(FC, D); 617 } 618 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 619 if (!(RD = RD->getDefinition())) 620 return nullptr; 621 // Check non-virtual bases. 622 for (const auto &I : RD->bases()) { 623 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 624 continue; 625 QualType Ty = I.getType(); 626 if (Ty.isNull()) 627 continue; 628 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 629 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 630 continue; 631 632 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 633 return cloneFullComment(FC, D); 634 } 635 } 636 // Check virtual bases. 637 for (const auto &I : RD->vbases()) { 638 if (I.getAccessSpecifier() != AS_public) 639 continue; 640 QualType Ty = I.getType(); 641 if (Ty.isNull()) 642 continue; 643 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 644 if (!(VirtualBase= VirtualBase->getDefinition())) 645 continue; 646 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 647 return cloneFullComment(FC, D); 648 } 649 } 650 } 651 return nullptr; 652 } 653 654 // If the RawComment was attached to other redeclaration of this Decl, we 655 // should parse the comment in context of that other Decl. This is important 656 // because comments can contain references to parameter names which can be 657 // different across redeclarations. 658 if (D != OriginalDecl && OriginalDecl) 659 return getCommentForDecl(OriginalDecl, PP); 660 661 comments::FullComment *FC = RC->parse(*this, PP, D); 662 ParsedComments[Canonical] = FC; 663 return FC; 664 } 665 666 void 667 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 668 const ASTContext &C, 669 TemplateTemplateParmDecl *Parm) { 670 ID.AddInteger(Parm->getDepth()); 671 ID.AddInteger(Parm->getPosition()); 672 ID.AddBoolean(Parm->isParameterPack()); 673 674 TemplateParameterList *Params = Parm->getTemplateParameters(); 675 ID.AddInteger(Params->size()); 676 for (TemplateParameterList::const_iterator P = Params->begin(), 677 PEnd = Params->end(); 678 P != PEnd; ++P) { 679 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 680 ID.AddInteger(0); 681 ID.AddBoolean(TTP->isParameterPack()); 682 const TypeConstraint *TC = TTP->getTypeConstraint(); 683 ID.AddBoolean(TC != nullptr); 684 if (TC) 685 TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, 686 /*Canonical=*/true); 687 if (TTP->isExpandedParameterPack()) { 688 ID.AddBoolean(true); 689 ID.AddInteger(TTP->getNumExpansionParameters()); 690 } else 691 ID.AddBoolean(false); 692 continue; 693 } 694 695 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 696 ID.AddInteger(1); 697 ID.AddBoolean(NTTP->isParameterPack()); 698 const Expr *TC = NTTP->getPlaceholderTypeConstraint(); 699 ID.AddBoolean(TC != nullptr); 700 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); 701 if (TC) 702 TC->Profile(ID, C, /*Canonical=*/true); 703 if (NTTP->isExpandedParameterPack()) { 704 ID.AddBoolean(true); 705 ID.AddInteger(NTTP->getNumExpansionTypes()); 706 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 707 QualType T = NTTP->getExpansionType(I); 708 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 709 } 710 } else 711 ID.AddBoolean(false); 712 continue; 713 } 714 715 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 716 ID.AddInteger(2); 717 Profile(ID, C, TTP); 718 } 719 Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); 720 ID.AddBoolean(RequiresClause != nullptr); 721 if (RequiresClause) 722 RequiresClause->Profile(ID, C, /*Canonical=*/true); 723 } 724 725 static Expr * 726 canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, 727 QualType ConstrainedType) { 728 // This is a bit ugly - we need to form a new immediately-declared 729 // constraint that references the new parameter; this would ideally 730 // require semantic analysis (e.g. template<C T> struct S {}; - the 731 // converted arguments of C<T> could be an argument pack if C is 732 // declared as template<typename... T> concept C = ...). 733 // We don't have semantic analysis here so we dig deep into the 734 // ready-made constraint expr and change the thing manually. 735 ConceptSpecializationExpr *CSE; 736 if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC)) 737 CSE = cast<ConceptSpecializationExpr>(Fold->getLHS()); 738 else 739 CSE = cast<ConceptSpecializationExpr>(IDC); 740 ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments(); 741 SmallVector<TemplateArgument, 3> NewConverted; 742 NewConverted.reserve(OldConverted.size()); 743 if (OldConverted.front().getKind() == TemplateArgument::Pack) { 744 // The case: 745 // template<typename... T> concept C = true; 746 // template<C<int> T> struct S; -> constraint is C<{T, int}> 747 NewConverted.push_back(ConstrainedType); 748 llvm::append_range(NewConverted, 749 OldConverted.front().pack_elements().drop_front(1)); 750 TemplateArgument NewPack(NewConverted); 751 752 NewConverted.clear(); 753 NewConverted.push_back(NewPack); 754 assert(OldConverted.size() == 1 && 755 "Template parameter pack should be the last parameter"); 756 } else { 757 assert(OldConverted.front().getKind() == TemplateArgument::Type && 758 "Unexpected first argument kind for immediately-declared " 759 "constraint"); 760 NewConverted.push_back(ConstrainedType); 761 llvm::append_range(NewConverted, OldConverted.drop_front(1)); 762 } 763 auto *CSD = ImplicitConceptSpecializationDecl::Create( 764 C, CSE->getNamedConcept()->getDeclContext(), 765 CSE->getNamedConcept()->getLocation(), NewConverted); 766 767 Expr *NewIDC = ConceptSpecializationExpr::Create( 768 C, CSE->getNamedConcept(), CSE->getTemplateArgsAsWritten(), CSD, 769 /*Satisfaction=*/nullptr, CSE->isInstantiationDependent(), 770 CSE->containsUnexpandedParameterPack()); 771 772 if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC)) 773 NewIDC = new (C) CXXFoldExpr( 774 OrigFold->getType(), /*Callee*/ nullptr, SourceLocation(), NewIDC, 775 BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, 776 SourceLocation(), /*NumExpansions=*/std::nullopt); 777 return NewIDC; 778 } 779 780 TemplateTemplateParmDecl * 781 ASTContext::getCanonicalTemplateTemplateParmDecl( 782 TemplateTemplateParmDecl *TTP) const { 783 // Check if we already have a canonical template template parameter. 784 llvm::FoldingSetNodeID ID; 785 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 786 void *InsertPos = nullptr; 787 CanonicalTemplateTemplateParm *Canonical 788 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 789 if (Canonical) 790 return Canonical->getParam(); 791 792 // Build a canonical template parameter list. 793 TemplateParameterList *Params = TTP->getTemplateParameters(); 794 SmallVector<NamedDecl *, 4> CanonParams; 795 CanonParams.reserve(Params->size()); 796 for (TemplateParameterList::const_iterator P = Params->begin(), 797 PEnd = Params->end(); 798 P != PEnd; ++P) { 799 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 800 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create( 801 *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 802 TTP->getDepth(), TTP->getIndex(), nullptr, false, 803 TTP->isParameterPack(), TTP->hasTypeConstraint(), 804 TTP->isExpandedParameterPack() 805 ? std::optional<unsigned>(TTP->getNumExpansionParameters()) 806 : std::nullopt); 807 if (const auto *TC = TTP->getTypeConstraint()) { 808 QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); 809 Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( 810 *this, TC->getImmediatelyDeclaredConstraint(), 811 ParamAsArgument); 812 NewTTP->setTypeConstraint( 813 NestedNameSpecifierLoc(), 814 DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), 815 SourceLocation()), /*FoundDecl=*/nullptr, 816 // Actually canonicalizing a TemplateArgumentLoc is difficult so we 817 // simply omit the ArgsAsWritten 818 TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); 819 } 820 CanonParams.push_back(NewTTP); 821 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 822 QualType T = getCanonicalType(NTTP->getType()); 823 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 824 NonTypeTemplateParmDecl *Param; 825 if (NTTP->isExpandedParameterPack()) { 826 SmallVector<QualType, 2> ExpandedTypes; 827 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 828 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 829 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 830 ExpandedTInfos.push_back( 831 getTrivialTypeSourceInfo(ExpandedTypes.back())); 832 } 833 834 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 835 SourceLocation(), 836 SourceLocation(), 837 NTTP->getDepth(), 838 NTTP->getPosition(), nullptr, 839 T, 840 TInfo, 841 ExpandedTypes, 842 ExpandedTInfos); 843 } else { 844 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 845 SourceLocation(), 846 SourceLocation(), 847 NTTP->getDepth(), 848 NTTP->getPosition(), nullptr, 849 T, 850 NTTP->isParameterPack(), 851 TInfo); 852 } 853 if (AutoType *AT = T->getContainedAutoType()) { 854 if (AT->isConstrained()) { 855 Param->setPlaceholderTypeConstraint( 856 canonicalizeImmediatelyDeclaredConstraint( 857 *this, NTTP->getPlaceholderTypeConstraint(), T)); 858 } 859 } 860 CanonParams.push_back(Param); 861 862 } else 863 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 864 cast<TemplateTemplateParmDecl>(*P))); 865 } 866 867 Expr *CanonRequiresClause = nullptr; 868 if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) 869 CanonRequiresClause = RequiresClause; 870 871 TemplateTemplateParmDecl *CanonTTP 872 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 873 SourceLocation(), TTP->getDepth(), 874 TTP->getPosition(), 875 TTP->isParameterPack(), 876 nullptr, 877 TemplateParameterList::Create(*this, SourceLocation(), 878 SourceLocation(), 879 CanonParams, 880 SourceLocation(), 881 CanonRequiresClause)); 882 883 // Get the new insert position for the node we care about. 884 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 885 assert(!Canonical && "Shouldn't be in the map!"); 886 (void)Canonical; 887 888 // Create the canonical template template parameter entry. 889 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 890 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 891 return CanonTTP; 892 } 893 894 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 895 auto Kind = getTargetInfo().getCXXABI().getKind(); 896 return getLangOpts().CXXABI.value_or(Kind); 897 } 898 899 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 900 if (!LangOpts.CPlusPlus) return nullptr; 901 902 switch (getCXXABIKind()) { 903 case TargetCXXABI::AppleARM64: 904 case TargetCXXABI::Fuchsia: 905 case TargetCXXABI::GenericARM: // Same as Itanium at this level 906 case TargetCXXABI::iOS: 907 case TargetCXXABI::WatchOS: 908 case TargetCXXABI::GenericAArch64: 909 case TargetCXXABI::GenericMIPS: 910 case TargetCXXABI::GenericItanium: 911 case TargetCXXABI::WebAssembly: 912 case TargetCXXABI::XL: 913 return CreateItaniumCXXABI(*this); 914 case TargetCXXABI::Microsoft: 915 return CreateMicrosoftCXXABI(*this); 916 } 917 llvm_unreachable("Invalid CXXABI type!"); 918 } 919 920 interp::Context &ASTContext::getInterpContext() { 921 if (!InterpContext) { 922 InterpContext.reset(new interp::Context(*this)); 923 } 924 return *InterpContext.get(); 925 } 926 927 ParentMapContext &ASTContext::getParentMapContext() { 928 if (!ParentMapCtx) 929 ParentMapCtx.reset(new ParentMapContext(*this)); 930 return *ParentMapCtx.get(); 931 } 932 933 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 934 const LangOptions &LangOpts) { 935 switch (LangOpts.getAddressSpaceMapMangling()) { 936 case LangOptions::ASMM_Target: 937 return TI.useAddressSpaceMapMangling(); 938 case LangOptions::ASMM_On: 939 return true; 940 case LangOptions::ASMM_Off: 941 return false; 942 } 943 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 944 } 945 946 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 947 IdentifierTable &idents, SelectorTable &sels, 948 Builtin::Context &builtins, TranslationUnitKind TUKind) 949 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 950 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 951 TemplateSpecializationTypes(this_()), 952 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 953 SubstTemplateTemplateParmPacks(this_()), 954 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 955 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 956 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 957 LangOpts.XRayNeverInstrumentFiles, 958 LangOpts.XRayAttrListFiles, SM)), 959 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 960 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 961 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 962 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 963 CompCategories(this_()), LastSDM(nullptr, 0) { 964 addTranslationUnitDecl(); 965 } 966 967 void ASTContext::cleanup() { 968 // Release the DenseMaps associated with DeclContext objects. 969 // FIXME: Is this the ideal solution? 970 ReleaseDeclContextMaps(); 971 972 // Call all of the deallocation functions on all of their targets. 973 for (auto &Pair : Deallocations) 974 (Pair.first)(Pair.second); 975 Deallocations.clear(); 976 977 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 978 // because they can contain DenseMaps. 979 for (llvm::DenseMap<const ObjCContainerDecl*, 980 const ASTRecordLayout*>::iterator 981 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 982 // Increment in loop to prevent using deallocated memory. 983 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 984 R->Destroy(*this); 985 ObjCLayouts.clear(); 986 987 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 988 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 989 // Increment in loop to prevent using deallocated memory. 990 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 991 R->Destroy(*this); 992 } 993 ASTRecordLayouts.clear(); 994 995 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 996 AEnd = DeclAttrs.end(); 997 A != AEnd; ++A) 998 A->second->~AttrVec(); 999 DeclAttrs.clear(); 1000 1001 for (const auto &Value : ModuleInitializers) 1002 Value.second->~PerModuleInitializers(); 1003 ModuleInitializers.clear(); 1004 } 1005 1006 ASTContext::~ASTContext() { cleanup(); } 1007 1008 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 1009 TraversalScope = TopLevelDecls; 1010 getParentMapContext().clear(); 1011 } 1012 1013 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 1014 Deallocations.push_back({Callback, Data}); 1015 } 1016 1017 void 1018 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 1019 ExternalSource = std::move(Source); 1020 } 1021 1022 void ASTContext::PrintStats() const { 1023 llvm::errs() << "\n*** AST Context Stats:\n"; 1024 llvm::errs() << " " << Types.size() << " types total.\n"; 1025 1026 unsigned counts[] = { 1027 #define TYPE(Name, Parent) 0, 1028 #define ABSTRACT_TYPE(Name, Parent) 1029 #include "clang/AST/TypeNodes.inc" 1030 0 // Extra 1031 }; 1032 1033 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 1034 Type *T = Types[i]; 1035 counts[(unsigned)T->getTypeClass()]++; 1036 } 1037 1038 unsigned Idx = 0; 1039 unsigned TotalBytes = 0; 1040 #define TYPE(Name, Parent) \ 1041 if (counts[Idx]) \ 1042 llvm::errs() << " " << counts[Idx] << " " << #Name \ 1043 << " types, " << sizeof(Name##Type) << " each " \ 1044 << "(" << counts[Idx] * sizeof(Name##Type) \ 1045 << " bytes)\n"; \ 1046 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 1047 ++Idx; 1048 #define ABSTRACT_TYPE(Name, Parent) 1049 #include "clang/AST/TypeNodes.inc" 1050 1051 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 1052 1053 // Implicit special member functions. 1054 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 1055 << NumImplicitDefaultConstructors 1056 << " implicit default constructors created\n"; 1057 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 1058 << NumImplicitCopyConstructors 1059 << " implicit copy constructors created\n"; 1060 if (getLangOpts().CPlusPlus) 1061 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 1062 << NumImplicitMoveConstructors 1063 << " implicit move constructors created\n"; 1064 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 1065 << NumImplicitCopyAssignmentOperators 1066 << " implicit copy assignment operators created\n"; 1067 if (getLangOpts().CPlusPlus) 1068 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 1069 << NumImplicitMoveAssignmentOperators 1070 << " implicit move assignment operators created\n"; 1071 llvm::errs() << NumImplicitDestructorsDeclared << "/" 1072 << NumImplicitDestructors 1073 << " implicit destructors created\n"; 1074 1075 if (ExternalSource) { 1076 llvm::errs() << "\n"; 1077 ExternalSource->PrintStats(); 1078 } 1079 1080 BumpAlloc.PrintStats(); 1081 } 1082 1083 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1084 bool NotifyListeners) { 1085 if (NotifyListeners) 1086 if (auto *Listener = getASTMutationListener()) 1087 Listener->RedefinedHiddenDefinition(ND, M); 1088 1089 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1090 } 1091 1092 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1093 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1094 if (It == MergedDefModules.end()) 1095 return; 1096 1097 auto &Merged = It->second; 1098 llvm::DenseSet<Module*> Found; 1099 for (Module *&M : Merged) 1100 if (!Found.insert(M).second) 1101 M = nullptr; 1102 llvm::erase_value(Merged, nullptr); 1103 } 1104 1105 ArrayRef<Module *> 1106 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1107 auto MergedIt = 1108 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1109 if (MergedIt == MergedDefModules.end()) 1110 return std::nullopt; 1111 return MergedIt->second; 1112 } 1113 1114 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1115 if (LazyInitializers.empty()) 1116 return; 1117 1118 auto *Source = Ctx.getExternalSource(); 1119 assert(Source && "lazy initializers but no external source"); 1120 1121 auto LazyInits = std::move(LazyInitializers); 1122 LazyInitializers.clear(); 1123 1124 for (auto ID : LazyInits) 1125 Initializers.push_back(Source->GetExternalDecl(ID)); 1126 1127 assert(LazyInitializers.empty() && 1128 "GetExternalDecl for lazy module initializer added more inits"); 1129 } 1130 1131 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1132 // One special case: if we add a module initializer that imports another 1133 // module, and that module's only initializer is an ImportDecl, simplify. 1134 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1135 auto It = ModuleInitializers.find(ID->getImportedModule()); 1136 1137 // Maybe the ImportDecl does nothing at all. (Common case.) 1138 if (It == ModuleInitializers.end()) 1139 return; 1140 1141 // Maybe the ImportDecl only imports another ImportDecl. 1142 auto &Imported = *It->second; 1143 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1144 Imported.resolve(*this); 1145 auto *OnlyDecl = Imported.Initializers.front(); 1146 if (isa<ImportDecl>(OnlyDecl)) 1147 D = OnlyDecl; 1148 } 1149 } 1150 1151 auto *&Inits = ModuleInitializers[M]; 1152 if (!Inits) 1153 Inits = new (*this) PerModuleInitializers; 1154 Inits->Initializers.push_back(D); 1155 } 1156 1157 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1158 auto *&Inits = ModuleInitializers[M]; 1159 if (!Inits) 1160 Inits = new (*this) PerModuleInitializers; 1161 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1162 IDs.begin(), IDs.end()); 1163 } 1164 1165 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1166 auto It = ModuleInitializers.find(M); 1167 if (It == ModuleInitializers.end()) 1168 return std::nullopt; 1169 1170 auto *Inits = It->second; 1171 Inits->resolve(*this); 1172 return Inits->Initializers; 1173 } 1174 1175 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1176 if (!ExternCContext) 1177 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1178 1179 return ExternCContext; 1180 } 1181 1182 BuiltinTemplateDecl * 1183 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1184 const IdentifierInfo *II) const { 1185 auto *BuiltinTemplate = 1186 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1187 BuiltinTemplate->setImplicit(); 1188 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1189 1190 return BuiltinTemplate; 1191 } 1192 1193 BuiltinTemplateDecl * 1194 ASTContext::getMakeIntegerSeqDecl() const { 1195 if (!MakeIntegerSeqDecl) 1196 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1197 getMakeIntegerSeqName()); 1198 return MakeIntegerSeqDecl; 1199 } 1200 1201 BuiltinTemplateDecl * 1202 ASTContext::getTypePackElementDecl() const { 1203 if (!TypePackElementDecl) 1204 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1205 getTypePackElementName()); 1206 return TypePackElementDecl; 1207 } 1208 1209 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1210 RecordDecl::TagKind TK) const { 1211 SourceLocation Loc; 1212 RecordDecl *NewDecl; 1213 if (getLangOpts().CPlusPlus) 1214 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1215 Loc, &Idents.get(Name)); 1216 else 1217 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1218 &Idents.get(Name)); 1219 NewDecl->setImplicit(); 1220 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1221 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1222 return NewDecl; 1223 } 1224 1225 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1226 StringRef Name) const { 1227 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1228 TypedefDecl *NewDecl = TypedefDecl::Create( 1229 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1230 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1231 NewDecl->setImplicit(); 1232 return NewDecl; 1233 } 1234 1235 TypedefDecl *ASTContext::getInt128Decl() const { 1236 if (!Int128Decl) 1237 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1238 return Int128Decl; 1239 } 1240 1241 TypedefDecl *ASTContext::getUInt128Decl() const { 1242 if (!UInt128Decl) 1243 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1244 return UInt128Decl; 1245 } 1246 1247 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1248 auto *Ty = new (*this, TypeAlignment) BuiltinType(K); 1249 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1250 Types.push_back(Ty); 1251 } 1252 1253 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1254 const TargetInfo *AuxTarget) { 1255 assert((!this->Target || this->Target == &Target) && 1256 "Incorrect target reinitialization"); 1257 assert(VoidTy.isNull() && "Context reinitialized?"); 1258 1259 this->Target = &Target; 1260 this->AuxTarget = AuxTarget; 1261 1262 ABI.reset(createCXXABI(Target)); 1263 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1264 1265 // C99 6.2.5p19. 1266 InitBuiltinType(VoidTy, BuiltinType::Void); 1267 1268 // C99 6.2.5p2. 1269 InitBuiltinType(BoolTy, BuiltinType::Bool); 1270 // C99 6.2.5p3. 1271 if (LangOpts.CharIsSigned) 1272 InitBuiltinType(CharTy, BuiltinType::Char_S); 1273 else 1274 InitBuiltinType(CharTy, BuiltinType::Char_U); 1275 // C99 6.2.5p4. 1276 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1277 InitBuiltinType(ShortTy, BuiltinType::Short); 1278 InitBuiltinType(IntTy, BuiltinType::Int); 1279 InitBuiltinType(LongTy, BuiltinType::Long); 1280 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1281 1282 // C99 6.2.5p6. 1283 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1284 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1285 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1286 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1287 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1288 1289 // C99 6.2.5p10. 1290 InitBuiltinType(FloatTy, BuiltinType::Float); 1291 InitBuiltinType(DoubleTy, BuiltinType::Double); 1292 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1293 1294 // GNU extension, __float128 for IEEE quadruple precision 1295 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1296 1297 // __ibm128 for IBM extended precision 1298 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1299 1300 // C11 extension ISO/IEC TS 18661-3 1301 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1302 1303 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1304 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1305 InitBuiltinType(AccumTy, BuiltinType::Accum); 1306 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1307 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1308 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1309 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1310 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1311 InitBuiltinType(FractTy, BuiltinType::Fract); 1312 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1313 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1314 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1315 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1316 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1317 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1318 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1319 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1320 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1321 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1322 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1323 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1324 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1325 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1326 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1327 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1328 1329 // GNU extension, 128-bit integers. 1330 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1331 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1332 1333 // C++ 3.9.1p5 1334 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1335 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1336 else // -fshort-wchar makes wchar_t be unsigned. 1337 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1338 if (LangOpts.CPlusPlus && LangOpts.WChar) 1339 WideCharTy = WCharTy; 1340 else { 1341 // C99 (or C++ using -fno-wchar). 1342 WideCharTy = getFromTargetType(Target.getWCharType()); 1343 } 1344 1345 WIntTy = getFromTargetType(Target.getWIntType()); 1346 1347 // C++20 (proposed) 1348 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1349 1350 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1351 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1352 else // C99 1353 Char16Ty = getFromTargetType(Target.getChar16Type()); 1354 1355 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1356 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1357 else // C99 1358 Char32Ty = getFromTargetType(Target.getChar32Type()); 1359 1360 // Placeholder type for type-dependent expressions whose type is 1361 // completely unknown. No code should ever check a type against 1362 // DependentTy and users should never see it; however, it is here to 1363 // help diagnose failures to properly check for type-dependent 1364 // expressions. 1365 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1366 1367 // Placeholder type for functions. 1368 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1369 1370 // Placeholder type for bound members. 1371 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1372 1373 // Placeholder type for pseudo-objects. 1374 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1375 1376 // "any" type; useful for debugger-like clients. 1377 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1378 1379 // Placeholder type for unbridged ARC casts. 1380 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1381 1382 // Placeholder type for builtin functions. 1383 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1384 1385 // Placeholder type for OMP array sections. 1386 if (LangOpts.OpenMP) { 1387 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1388 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1389 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1390 } 1391 if (LangOpts.MatrixTypes) 1392 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1393 1394 // Builtin types for 'id', 'Class', and 'SEL'. 1395 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1396 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1397 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1398 1399 if (LangOpts.OpenCL) { 1400 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1401 InitBuiltinType(SingletonId, BuiltinType::Id); 1402 #include "clang/Basic/OpenCLImageTypes.def" 1403 1404 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1405 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1406 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1407 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1408 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1409 1410 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1411 InitBuiltinType(Id##Ty, BuiltinType::Id); 1412 #include "clang/Basic/OpenCLExtensionTypes.def" 1413 } 1414 1415 if (Target.hasAArch64SVETypes()) { 1416 #define SVE_TYPE(Name, Id, SingletonId) \ 1417 InitBuiltinType(SingletonId, BuiltinType::Id); 1418 #include "clang/Basic/AArch64SVEACLETypes.def" 1419 } 1420 1421 if (Target.getTriple().isPPC64()) { 1422 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1423 InitBuiltinType(Id##Ty, BuiltinType::Id); 1424 #include "clang/Basic/PPCTypes.def" 1425 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1426 InitBuiltinType(Id##Ty, BuiltinType::Id); 1427 #include "clang/Basic/PPCTypes.def" 1428 } 1429 1430 if (Target.hasRISCVVTypes()) { 1431 #define RVV_TYPE(Name, Id, SingletonId) \ 1432 InitBuiltinType(SingletonId, BuiltinType::Id); 1433 #include "clang/Basic/RISCVVTypes.def" 1434 } 1435 1436 // Builtin type for __objc_yes and __objc_no 1437 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1438 SignedCharTy : BoolTy); 1439 1440 ObjCConstantStringType = QualType(); 1441 1442 ObjCSuperType = QualType(); 1443 1444 // void * type 1445 if (LangOpts.OpenCLGenericAddressSpace) { 1446 auto Q = VoidTy.getQualifiers(); 1447 Q.setAddressSpace(LangAS::opencl_generic); 1448 VoidPtrTy = getPointerType(getCanonicalType( 1449 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1450 } else { 1451 VoidPtrTy = getPointerType(VoidTy); 1452 } 1453 1454 // nullptr type (C++0x 2.14.7) 1455 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1456 1457 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1458 InitBuiltinType(HalfTy, BuiltinType::Half); 1459 1460 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1461 1462 // Builtin type used to help define __builtin_va_list. 1463 VaListTagDecl = nullptr; 1464 1465 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1466 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1467 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1468 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1469 } 1470 } 1471 1472 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1473 return SourceMgr.getDiagnostics(); 1474 } 1475 1476 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1477 AttrVec *&Result = DeclAttrs[D]; 1478 if (!Result) { 1479 void *Mem = Allocate(sizeof(AttrVec)); 1480 Result = new (Mem) AttrVec; 1481 } 1482 1483 return *Result; 1484 } 1485 1486 /// Erase the attributes corresponding to the given declaration. 1487 void ASTContext::eraseDeclAttrs(const Decl *D) { 1488 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1489 if (Pos != DeclAttrs.end()) { 1490 Pos->second->~AttrVec(); 1491 DeclAttrs.erase(Pos); 1492 } 1493 } 1494 1495 // FIXME: Remove ? 1496 MemberSpecializationInfo * 1497 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1498 assert(Var->isStaticDataMember() && "Not a static data member"); 1499 return getTemplateOrSpecializationInfo(Var) 1500 .dyn_cast<MemberSpecializationInfo *>(); 1501 } 1502 1503 ASTContext::TemplateOrSpecializationInfo 1504 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1505 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1506 TemplateOrInstantiation.find(Var); 1507 if (Pos == TemplateOrInstantiation.end()) 1508 return {}; 1509 1510 return Pos->second; 1511 } 1512 1513 void 1514 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1515 TemplateSpecializationKind TSK, 1516 SourceLocation PointOfInstantiation) { 1517 assert(Inst->isStaticDataMember() && "Not a static data member"); 1518 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1519 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1520 Tmpl, TSK, PointOfInstantiation)); 1521 } 1522 1523 void 1524 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1525 TemplateOrSpecializationInfo TSI) { 1526 assert(!TemplateOrInstantiation[Inst] && 1527 "Already noted what the variable was instantiated from"); 1528 TemplateOrInstantiation[Inst] = TSI; 1529 } 1530 1531 NamedDecl * 1532 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1533 auto Pos = InstantiatedFromUsingDecl.find(UUD); 1534 if (Pos == InstantiatedFromUsingDecl.end()) 1535 return nullptr; 1536 1537 return Pos->second; 1538 } 1539 1540 void 1541 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1542 assert((isa<UsingDecl>(Pattern) || 1543 isa<UnresolvedUsingValueDecl>(Pattern) || 1544 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1545 "pattern decl is not a using decl"); 1546 assert((isa<UsingDecl>(Inst) || 1547 isa<UnresolvedUsingValueDecl>(Inst) || 1548 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1549 "instantiation did not produce a using decl"); 1550 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1551 InstantiatedFromUsingDecl[Inst] = Pattern; 1552 } 1553 1554 UsingEnumDecl * 1555 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1556 auto Pos = InstantiatedFromUsingEnumDecl.find(UUD); 1557 if (Pos == InstantiatedFromUsingEnumDecl.end()) 1558 return nullptr; 1559 1560 return Pos->second; 1561 } 1562 1563 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1564 UsingEnumDecl *Pattern) { 1565 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1566 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1567 } 1568 1569 UsingShadowDecl * 1570 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1571 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 1572 = InstantiatedFromUsingShadowDecl.find(Inst); 1573 if (Pos == InstantiatedFromUsingShadowDecl.end()) 1574 return nullptr; 1575 1576 return Pos->second; 1577 } 1578 1579 void 1580 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1581 UsingShadowDecl *Pattern) { 1582 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1583 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1584 } 1585 1586 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1587 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 1588 = InstantiatedFromUnnamedFieldDecl.find(Field); 1589 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 1590 return nullptr; 1591 1592 return Pos->second; 1593 } 1594 1595 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1596 FieldDecl *Tmpl) { 1597 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1598 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1599 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1600 "Already noted what unnamed field was instantiated from"); 1601 1602 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1603 } 1604 1605 ASTContext::overridden_cxx_method_iterator 1606 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1607 return overridden_methods(Method).begin(); 1608 } 1609 1610 ASTContext::overridden_cxx_method_iterator 1611 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1612 return overridden_methods(Method).end(); 1613 } 1614 1615 unsigned 1616 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1617 auto Range = overridden_methods(Method); 1618 return Range.end() - Range.begin(); 1619 } 1620 1621 ASTContext::overridden_method_range 1622 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1623 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1624 OverriddenMethods.find(Method->getCanonicalDecl()); 1625 if (Pos == OverriddenMethods.end()) 1626 return overridden_method_range(nullptr, nullptr); 1627 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1628 } 1629 1630 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1631 const CXXMethodDecl *Overridden) { 1632 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1633 OverriddenMethods[Method].push_back(Overridden); 1634 } 1635 1636 void ASTContext::getOverriddenMethods( 1637 const NamedDecl *D, 1638 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1639 assert(D); 1640 1641 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1642 Overridden.append(overridden_methods_begin(CXXMethod), 1643 overridden_methods_end(CXXMethod)); 1644 return; 1645 } 1646 1647 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1648 if (!Method) 1649 return; 1650 1651 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1652 Method->getOverriddenMethods(OverDecls); 1653 Overridden.append(OverDecls.begin(), OverDecls.end()); 1654 } 1655 1656 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1657 assert(!Import->getNextLocalImport() && 1658 "Import declaration already in the chain"); 1659 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1660 if (!FirstLocalImport) { 1661 FirstLocalImport = Import; 1662 LastLocalImport = Import; 1663 return; 1664 } 1665 1666 LastLocalImport->setNextLocalImport(Import); 1667 LastLocalImport = Import; 1668 } 1669 1670 //===----------------------------------------------------------------------===// 1671 // Type Sizing and Analysis 1672 //===----------------------------------------------------------------------===// 1673 1674 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1675 /// scalar floating point type. 1676 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1677 switch (T->castAs<BuiltinType>()->getKind()) { 1678 default: 1679 llvm_unreachable("Not a floating point type!"); 1680 case BuiltinType::BFloat16: 1681 return Target->getBFloat16Format(); 1682 case BuiltinType::Float16: 1683 return Target->getHalfFormat(); 1684 case BuiltinType::Half: 1685 // For HLSL, when the native half type is disabled, half will be treat as 1686 // float. 1687 if (getLangOpts().HLSL) 1688 if (getLangOpts().NativeHalfType) 1689 return Target->getHalfFormat(); 1690 else 1691 return Target->getFloatFormat(); 1692 else 1693 return Target->getHalfFormat(); 1694 case BuiltinType::Float: return Target->getFloatFormat(); 1695 case BuiltinType::Double: return Target->getDoubleFormat(); 1696 case BuiltinType::Ibm128: 1697 return Target->getIbm128Format(); 1698 case BuiltinType::LongDouble: 1699 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1700 return AuxTarget->getLongDoubleFormat(); 1701 return Target->getLongDoubleFormat(); 1702 case BuiltinType::Float128: 1703 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1704 return AuxTarget->getFloat128Format(); 1705 return Target->getFloat128Format(); 1706 } 1707 } 1708 1709 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1710 unsigned Align = Target->getCharWidth(); 1711 1712 bool UseAlignAttrOnly = false; 1713 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1714 Align = AlignFromAttr; 1715 1716 // __attribute__((aligned)) can increase or decrease alignment 1717 // *except* on a struct or struct member, where it only increases 1718 // alignment unless 'packed' is also specified. 1719 // 1720 // It is an error for alignas to decrease alignment, so we can 1721 // ignore that possibility; Sema should diagnose it. 1722 if (isa<FieldDecl>(D)) { 1723 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1724 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1725 } else { 1726 UseAlignAttrOnly = true; 1727 } 1728 } 1729 else if (isa<FieldDecl>(D)) 1730 UseAlignAttrOnly = 1731 D->hasAttr<PackedAttr>() || 1732 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1733 1734 // If we're using the align attribute only, just ignore everything 1735 // else about the declaration and its type. 1736 if (UseAlignAttrOnly) { 1737 // do nothing 1738 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1739 QualType T = VD->getType(); 1740 if (const auto *RT = T->getAs<ReferenceType>()) { 1741 if (ForAlignof) 1742 T = RT->getPointeeType(); 1743 else 1744 T = getPointerType(RT->getPointeeType()); 1745 } 1746 QualType BaseT = getBaseElementType(T); 1747 if (T->isFunctionType()) 1748 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1749 else if (!BaseT->isIncompleteType()) { 1750 // Adjust alignments of declarations with array type by the 1751 // large-array alignment on the target. 1752 if (const ArrayType *arrayType = getAsArrayType(T)) { 1753 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1754 if (!ForAlignof && MinWidth) { 1755 if (isa<VariableArrayType>(arrayType)) 1756 Align = std::max(Align, Target->getLargeArrayAlign()); 1757 else if (isa<ConstantArrayType>(arrayType) && 1758 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1759 Align = std::max(Align, Target->getLargeArrayAlign()); 1760 } 1761 } 1762 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1763 if (BaseT.getQualifiers().hasUnaligned()) 1764 Align = Target->getCharWidth(); 1765 if (const auto *VD = dyn_cast<VarDecl>(D)) { 1766 if (VD->hasGlobalStorage() && !ForAlignof) { 1767 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 1768 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1769 } 1770 } 1771 } 1772 1773 // Fields can be subject to extra alignment constraints, like if 1774 // the field is packed, the struct is packed, or the struct has a 1775 // a max-field-alignment constraint (#pragma pack). So calculate 1776 // the actual alignment of the field within the struct, and then 1777 // (as we're expected to) constrain that by the alignment of the type. 1778 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1779 const RecordDecl *Parent = Field->getParent(); 1780 // We can only produce a sensible answer if the record is valid. 1781 if (!Parent->isInvalidDecl()) { 1782 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1783 1784 // Start with the record's overall alignment. 1785 unsigned FieldAlign = toBits(Layout.getAlignment()); 1786 1787 // Use the GCD of that and the offset within the record. 1788 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1789 if (Offset > 0) { 1790 // Alignment is always a power of 2, so the GCD will be a power of 2, 1791 // which means we get to do this crazy thing instead of Euclid's. 1792 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1793 if (LowBitOfOffset < FieldAlign) 1794 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1795 } 1796 1797 Align = std::min(Align, FieldAlign); 1798 } 1799 } 1800 } 1801 1802 // Some targets have hard limitation on the maximum requestable alignment in 1803 // aligned attribute for static variables. 1804 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1805 const auto *VD = dyn_cast<VarDecl>(D); 1806 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1807 Align = std::min(Align, MaxAlignedAttr); 1808 1809 return toCharUnitsFromBits(Align); 1810 } 1811 1812 CharUnits ASTContext::getExnObjectAlignment() const { 1813 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1814 } 1815 1816 // getTypeInfoDataSizeInChars - Return the size of a type, in 1817 // chars. If the type is a record, its data size is returned. This is 1818 // the size of the memcpy that's performed when assigning this type 1819 // using a trivial copy/move assignment operator. 1820 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1821 TypeInfoChars Info = getTypeInfoInChars(T); 1822 1823 // In C++, objects can sometimes be allocated into the tail padding 1824 // of a base-class subobject. We decide whether that's possible 1825 // during class layout, so here we can just trust the layout results. 1826 if (getLangOpts().CPlusPlus) { 1827 if (const auto *RT = T->getAs<RecordType>()) { 1828 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1829 Info.Width = layout.getDataSize(); 1830 } 1831 } 1832 1833 return Info; 1834 } 1835 1836 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1837 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1838 TypeInfoChars 1839 static getConstantArrayInfoInChars(const ASTContext &Context, 1840 const ConstantArrayType *CAT) { 1841 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1842 uint64_t Size = CAT->getSize().getZExtValue(); 1843 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1844 (uint64_t)(-1)/Size) && 1845 "Overflow in array type char size evaluation"); 1846 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1847 unsigned Align = EltInfo.Align.getQuantity(); 1848 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1849 Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1850 Width = llvm::alignTo(Width, Align); 1851 return TypeInfoChars(CharUnits::fromQuantity(Width), 1852 CharUnits::fromQuantity(Align), 1853 EltInfo.AlignRequirement); 1854 } 1855 1856 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1857 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1858 return getConstantArrayInfoInChars(*this, CAT); 1859 TypeInfo Info = getTypeInfo(T); 1860 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1861 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1862 } 1863 1864 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1865 return getTypeInfoInChars(T.getTypePtr()); 1866 } 1867 1868 bool ASTContext::isPromotableIntegerType(QualType T) const { 1869 // HLSL doesn't promote all small integer types to int, it 1870 // just uses the rank-based promotion rules for all types. 1871 if (getLangOpts().HLSL) 1872 return false; 1873 1874 if (const auto *BT = T->getAs<BuiltinType>()) 1875 switch (BT->getKind()) { 1876 case BuiltinType::Bool: 1877 case BuiltinType::Char_S: 1878 case BuiltinType::Char_U: 1879 case BuiltinType::SChar: 1880 case BuiltinType::UChar: 1881 case BuiltinType::Short: 1882 case BuiltinType::UShort: 1883 case BuiltinType::WChar_S: 1884 case BuiltinType::WChar_U: 1885 case BuiltinType::Char8: 1886 case BuiltinType::Char16: 1887 case BuiltinType::Char32: 1888 return true; 1889 default: 1890 return false; 1891 } 1892 1893 // Enumerated types are promotable to their compatible integer types 1894 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). 1895 if (const auto *ET = T->getAs<EnumType>()) { 1896 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() || 1897 ET->getDecl()->isScoped()) 1898 return false; 1899 1900 return true; 1901 } 1902 1903 return false; 1904 } 1905 1906 bool ASTContext::isAlignmentRequired(const Type *T) const { 1907 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1908 } 1909 1910 bool ASTContext::isAlignmentRequired(QualType T) const { 1911 return isAlignmentRequired(T.getTypePtr()); 1912 } 1913 1914 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1915 bool NeedsPreferredAlignment) const { 1916 // An alignment on a typedef overrides anything else. 1917 if (const auto *TT = T->getAs<TypedefType>()) 1918 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1919 return Align; 1920 1921 // If we have an (array of) complete type, we're done. 1922 T = getBaseElementType(T); 1923 if (!T->isIncompleteType()) 1924 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1925 1926 // If we had an array type, its element type might be a typedef 1927 // type with an alignment attribute. 1928 if (const auto *TT = T->getAs<TypedefType>()) 1929 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1930 return Align; 1931 1932 // Otherwise, see if the declaration of the type had an attribute. 1933 if (const auto *TT = T->getAs<TagType>()) 1934 return TT->getDecl()->getMaxAlignment(); 1935 1936 return 0; 1937 } 1938 1939 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1940 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1941 if (I != MemoizedTypeInfo.end()) 1942 return I->second; 1943 1944 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1945 TypeInfo TI = getTypeInfoImpl(T); 1946 MemoizedTypeInfo[T] = TI; 1947 return TI; 1948 } 1949 1950 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1951 /// method does not work on incomplete types. 1952 /// 1953 /// FIXME: Pointers into different addr spaces could have different sizes and 1954 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1955 /// should take a QualType, &c. 1956 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1957 uint64_t Width = 0; 1958 unsigned Align = 8; 1959 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1960 LangAS AS = LangAS::Default; 1961 switch (T->getTypeClass()) { 1962 #define TYPE(Class, Base) 1963 #define ABSTRACT_TYPE(Class, Base) 1964 #define NON_CANONICAL_TYPE(Class, Base) 1965 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1966 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1967 case Type::Class: \ 1968 assert(!T->isDependentType() && "should not see dependent types here"); \ 1969 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1970 #include "clang/AST/TypeNodes.inc" 1971 llvm_unreachable("Should not see dependent types"); 1972 1973 case Type::FunctionNoProto: 1974 case Type::FunctionProto: 1975 // GCC extension: alignof(function) = 32 bits 1976 Width = 0; 1977 Align = 32; 1978 break; 1979 1980 case Type::IncompleteArray: 1981 case Type::VariableArray: 1982 case Type::ConstantArray: { 1983 // Model non-constant sized arrays as size zero, but track the alignment. 1984 uint64_t Size = 0; 1985 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1986 Size = CAT->getSize().getZExtValue(); 1987 1988 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1989 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1990 "Overflow in array type bit size evaluation"); 1991 Width = EltInfo.Width * Size; 1992 Align = EltInfo.Align; 1993 AlignRequirement = EltInfo.AlignRequirement; 1994 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1995 getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1996 Width = llvm::alignTo(Width, Align); 1997 break; 1998 } 1999 2000 case Type::ExtVector: 2001 case Type::Vector: { 2002 const auto *VT = cast<VectorType>(T); 2003 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 2004 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 2005 : EltInfo.Width * VT->getNumElements(); 2006 // Enforce at least byte alignment. 2007 Align = std::max<unsigned>(8, Width); 2008 2009 // If the alignment is not a power of 2, round up to the next power of 2. 2010 // This happens for non-power-of-2 length vectors. 2011 if (Align & (Align-1)) { 2012 Align = llvm::NextPowerOf2(Align); 2013 Width = llvm::alignTo(Width, Align); 2014 } 2015 // Adjust the alignment based on the target max. 2016 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 2017 if (TargetVectorAlign && TargetVectorAlign < Align) 2018 Align = TargetVectorAlign; 2019 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 2020 // Adjust the alignment for fixed-length SVE vectors. This is important 2021 // for non-power-of-2 vector lengths. 2022 Align = 128; 2023 else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 2024 // Adjust the alignment for fixed-length SVE predicates. 2025 Align = 16; 2026 break; 2027 } 2028 2029 case Type::ConstantMatrix: { 2030 const auto *MT = cast<ConstantMatrixType>(T); 2031 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 2032 // The internal layout of a matrix value is implementation defined. 2033 // Initially be ABI compatible with arrays with respect to alignment and 2034 // size. 2035 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 2036 Align = ElementInfo.Align; 2037 break; 2038 } 2039 2040 case Type::Builtin: 2041 switch (cast<BuiltinType>(T)->getKind()) { 2042 default: llvm_unreachable("Unknown builtin type!"); 2043 case BuiltinType::Void: 2044 // GCC extension: alignof(void) = 8 bits. 2045 Width = 0; 2046 Align = 8; 2047 break; 2048 case BuiltinType::Bool: 2049 Width = Target->getBoolWidth(); 2050 Align = Target->getBoolAlign(); 2051 break; 2052 case BuiltinType::Char_S: 2053 case BuiltinType::Char_U: 2054 case BuiltinType::UChar: 2055 case BuiltinType::SChar: 2056 case BuiltinType::Char8: 2057 Width = Target->getCharWidth(); 2058 Align = Target->getCharAlign(); 2059 break; 2060 case BuiltinType::WChar_S: 2061 case BuiltinType::WChar_U: 2062 Width = Target->getWCharWidth(); 2063 Align = Target->getWCharAlign(); 2064 break; 2065 case BuiltinType::Char16: 2066 Width = Target->getChar16Width(); 2067 Align = Target->getChar16Align(); 2068 break; 2069 case BuiltinType::Char32: 2070 Width = Target->getChar32Width(); 2071 Align = Target->getChar32Align(); 2072 break; 2073 case BuiltinType::UShort: 2074 case BuiltinType::Short: 2075 Width = Target->getShortWidth(); 2076 Align = Target->getShortAlign(); 2077 break; 2078 case BuiltinType::UInt: 2079 case BuiltinType::Int: 2080 Width = Target->getIntWidth(); 2081 Align = Target->getIntAlign(); 2082 break; 2083 case BuiltinType::ULong: 2084 case BuiltinType::Long: 2085 Width = Target->getLongWidth(); 2086 Align = Target->getLongAlign(); 2087 break; 2088 case BuiltinType::ULongLong: 2089 case BuiltinType::LongLong: 2090 Width = Target->getLongLongWidth(); 2091 Align = Target->getLongLongAlign(); 2092 break; 2093 case BuiltinType::Int128: 2094 case BuiltinType::UInt128: 2095 Width = 128; 2096 Align = Target->getInt128Align(); 2097 break; 2098 case BuiltinType::ShortAccum: 2099 case BuiltinType::UShortAccum: 2100 case BuiltinType::SatShortAccum: 2101 case BuiltinType::SatUShortAccum: 2102 Width = Target->getShortAccumWidth(); 2103 Align = Target->getShortAccumAlign(); 2104 break; 2105 case BuiltinType::Accum: 2106 case BuiltinType::UAccum: 2107 case BuiltinType::SatAccum: 2108 case BuiltinType::SatUAccum: 2109 Width = Target->getAccumWidth(); 2110 Align = Target->getAccumAlign(); 2111 break; 2112 case BuiltinType::LongAccum: 2113 case BuiltinType::ULongAccum: 2114 case BuiltinType::SatLongAccum: 2115 case BuiltinType::SatULongAccum: 2116 Width = Target->getLongAccumWidth(); 2117 Align = Target->getLongAccumAlign(); 2118 break; 2119 case BuiltinType::ShortFract: 2120 case BuiltinType::UShortFract: 2121 case BuiltinType::SatShortFract: 2122 case BuiltinType::SatUShortFract: 2123 Width = Target->getShortFractWidth(); 2124 Align = Target->getShortFractAlign(); 2125 break; 2126 case BuiltinType::Fract: 2127 case BuiltinType::UFract: 2128 case BuiltinType::SatFract: 2129 case BuiltinType::SatUFract: 2130 Width = Target->getFractWidth(); 2131 Align = Target->getFractAlign(); 2132 break; 2133 case BuiltinType::LongFract: 2134 case BuiltinType::ULongFract: 2135 case BuiltinType::SatLongFract: 2136 case BuiltinType::SatULongFract: 2137 Width = Target->getLongFractWidth(); 2138 Align = Target->getLongFractAlign(); 2139 break; 2140 case BuiltinType::BFloat16: 2141 if (Target->hasBFloat16Type()) { 2142 Width = Target->getBFloat16Width(); 2143 Align = Target->getBFloat16Align(); 2144 } 2145 break; 2146 case BuiltinType::Float16: 2147 case BuiltinType::Half: 2148 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2149 !getLangOpts().OpenMPIsDevice) { 2150 Width = Target->getHalfWidth(); 2151 Align = Target->getHalfAlign(); 2152 } else { 2153 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2154 "Expected OpenMP device compilation."); 2155 Width = AuxTarget->getHalfWidth(); 2156 Align = AuxTarget->getHalfAlign(); 2157 } 2158 break; 2159 case BuiltinType::Float: 2160 Width = Target->getFloatWidth(); 2161 Align = Target->getFloatAlign(); 2162 break; 2163 case BuiltinType::Double: 2164 Width = Target->getDoubleWidth(); 2165 Align = Target->getDoubleAlign(); 2166 break; 2167 case BuiltinType::Ibm128: 2168 Width = Target->getIbm128Width(); 2169 Align = Target->getIbm128Align(); 2170 break; 2171 case BuiltinType::LongDouble: 2172 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2173 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2174 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2175 Width = AuxTarget->getLongDoubleWidth(); 2176 Align = AuxTarget->getLongDoubleAlign(); 2177 } else { 2178 Width = Target->getLongDoubleWidth(); 2179 Align = Target->getLongDoubleAlign(); 2180 } 2181 break; 2182 case BuiltinType::Float128: 2183 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2184 !getLangOpts().OpenMPIsDevice) { 2185 Width = Target->getFloat128Width(); 2186 Align = Target->getFloat128Align(); 2187 } else { 2188 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2189 "Expected OpenMP device compilation."); 2190 Width = AuxTarget->getFloat128Width(); 2191 Align = AuxTarget->getFloat128Align(); 2192 } 2193 break; 2194 case BuiltinType::NullPtr: 2195 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*) 2196 Width = Target->getPointerWidth(LangAS::Default); 2197 Align = Target->getPointerAlign(LangAS::Default); 2198 break; 2199 case BuiltinType::ObjCId: 2200 case BuiltinType::ObjCClass: 2201 case BuiltinType::ObjCSel: 2202 Width = Target->getPointerWidth(LangAS::Default); 2203 Align = Target->getPointerAlign(LangAS::Default); 2204 break; 2205 case BuiltinType::OCLSampler: 2206 case BuiltinType::OCLEvent: 2207 case BuiltinType::OCLClkEvent: 2208 case BuiltinType::OCLQueue: 2209 case BuiltinType::OCLReserveID: 2210 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2211 case BuiltinType::Id: 2212 #include "clang/Basic/OpenCLImageTypes.def" 2213 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2214 case BuiltinType::Id: 2215 #include "clang/Basic/OpenCLExtensionTypes.def" 2216 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 2217 Width = Target->getPointerWidth(AS); 2218 Align = Target->getPointerAlign(AS); 2219 break; 2220 // The SVE types are effectively target-specific. The length of an 2221 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2222 // of 128 bits. There is one predicate bit for each vector byte, so the 2223 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2224 // 2225 // Because the length is only known at runtime, we use a dummy value 2226 // of 0 for the static length. The alignment values are those defined 2227 // by the Procedure Call Standard for the Arm Architecture. 2228 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2229 IsSigned, IsFP, IsBF) \ 2230 case BuiltinType::Id: \ 2231 Width = 0; \ 2232 Align = 128; \ 2233 break; 2234 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2235 case BuiltinType::Id: \ 2236 Width = 0; \ 2237 Align = 16; \ 2238 break; 2239 #include "clang/Basic/AArch64SVEACLETypes.def" 2240 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2241 case BuiltinType::Id: \ 2242 Width = Size; \ 2243 Align = Size; \ 2244 break; 2245 #include "clang/Basic/PPCTypes.def" 2246 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2247 IsFP) \ 2248 case BuiltinType::Id: \ 2249 Width = 0; \ 2250 Align = ElBits; \ 2251 break; 2252 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2253 case BuiltinType::Id: \ 2254 Width = 0; \ 2255 Align = 8; \ 2256 break; 2257 #include "clang/Basic/RISCVVTypes.def" 2258 } 2259 break; 2260 case Type::ObjCObjectPointer: 2261 Width = Target->getPointerWidth(LangAS::Default); 2262 Align = Target->getPointerAlign(LangAS::Default); 2263 break; 2264 case Type::BlockPointer: 2265 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace(); 2266 Width = Target->getPointerWidth(AS); 2267 Align = Target->getPointerAlign(AS); 2268 break; 2269 case Type::LValueReference: 2270 case Type::RValueReference: 2271 // alignof and sizeof should never enter this code path here, so we go 2272 // the pointer route. 2273 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace(); 2274 Width = Target->getPointerWidth(AS); 2275 Align = Target->getPointerAlign(AS); 2276 break; 2277 case Type::Pointer: 2278 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace(); 2279 Width = Target->getPointerWidth(AS); 2280 Align = Target->getPointerAlign(AS); 2281 break; 2282 case Type::MemberPointer: { 2283 const auto *MPT = cast<MemberPointerType>(T); 2284 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2285 Width = MPI.Width; 2286 Align = MPI.Align; 2287 break; 2288 } 2289 case Type::Complex: { 2290 // Complex types have the same alignment as their elements, but twice the 2291 // size. 2292 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2293 Width = EltInfo.Width * 2; 2294 Align = EltInfo.Align; 2295 break; 2296 } 2297 case Type::ObjCObject: 2298 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2299 case Type::Adjusted: 2300 case Type::Decayed: 2301 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2302 case Type::ObjCInterface: { 2303 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2304 if (ObjCI->getDecl()->isInvalidDecl()) { 2305 Width = 8; 2306 Align = 8; 2307 break; 2308 } 2309 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2310 Width = toBits(Layout.getSize()); 2311 Align = toBits(Layout.getAlignment()); 2312 break; 2313 } 2314 case Type::BitInt: { 2315 const auto *EIT = cast<BitIntType>(T); 2316 Align = 2317 std::min(static_cast<unsigned>(std::max( 2318 getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), 2319 Target->getLongLongAlign()); 2320 Width = llvm::alignTo(EIT->getNumBits(), Align); 2321 break; 2322 } 2323 case Type::Record: 2324 case Type::Enum: { 2325 const auto *TT = cast<TagType>(T); 2326 2327 if (TT->getDecl()->isInvalidDecl()) { 2328 Width = 8; 2329 Align = 8; 2330 break; 2331 } 2332 2333 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2334 const EnumDecl *ED = ET->getDecl(); 2335 TypeInfo Info = 2336 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2337 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2338 Info.Align = AttrAlign; 2339 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2340 } 2341 return Info; 2342 } 2343 2344 const auto *RT = cast<RecordType>(TT); 2345 const RecordDecl *RD = RT->getDecl(); 2346 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2347 Width = toBits(Layout.getSize()); 2348 Align = toBits(Layout.getAlignment()); 2349 AlignRequirement = RD->hasAttr<AlignedAttr>() 2350 ? AlignRequirementKind::RequiredByRecord 2351 : AlignRequirementKind::None; 2352 break; 2353 } 2354 2355 case Type::SubstTemplateTypeParm: 2356 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2357 getReplacementType().getTypePtr()); 2358 2359 case Type::Auto: 2360 case Type::DeducedTemplateSpecialization: { 2361 const auto *A = cast<DeducedType>(T); 2362 assert(!A->getDeducedType().isNull() && 2363 "cannot request the size of an undeduced or dependent auto type"); 2364 return getTypeInfo(A->getDeducedType().getTypePtr()); 2365 } 2366 2367 case Type::Paren: 2368 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2369 2370 case Type::MacroQualified: 2371 return getTypeInfo( 2372 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2373 2374 case Type::ObjCTypeParam: 2375 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2376 2377 case Type::Using: 2378 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2379 2380 case Type::Typedef: { 2381 const auto *TT = cast<TypedefType>(T); 2382 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr()); 2383 // If the typedef has an aligned attribute on it, it overrides any computed 2384 // alignment we have. This violates the GCC documentation (which says that 2385 // attribute(aligned) can only round up) but matches its implementation. 2386 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) { 2387 Align = AttrAlign; 2388 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2389 } else { 2390 Align = Info.Align; 2391 AlignRequirement = Info.AlignRequirement; 2392 } 2393 Width = Info.Width; 2394 break; 2395 } 2396 2397 case Type::Elaborated: 2398 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2399 2400 case Type::Attributed: 2401 return getTypeInfo( 2402 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2403 2404 case Type::BTFTagAttributed: 2405 return getTypeInfo( 2406 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2407 2408 case Type::Atomic: { 2409 // Start with the base type information. 2410 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2411 Width = Info.Width; 2412 Align = Info.Align; 2413 2414 if (!Width) { 2415 // An otherwise zero-sized type should still generate an 2416 // atomic operation. 2417 Width = Target->getCharWidth(); 2418 assert(Align); 2419 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2420 // If the size of the type doesn't exceed the platform's max 2421 // atomic promotion width, make the size and alignment more 2422 // favorable to atomic operations: 2423 2424 // Round the size up to a power of 2. 2425 if (!llvm::isPowerOf2_64(Width)) 2426 Width = llvm::NextPowerOf2(Width); 2427 2428 // Set the alignment equal to the size. 2429 Align = static_cast<unsigned>(Width); 2430 } 2431 } 2432 break; 2433 2434 case Type::Pipe: 2435 Width = Target->getPointerWidth(LangAS::opencl_global); 2436 Align = Target->getPointerAlign(LangAS::opencl_global); 2437 break; 2438 } 2439 2440 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2441 return TypeInfo(Width, Align, AlignRequirement); 2442 } 2443 2444 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2445 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2446 if (I != MemoizedUnadjustedAlign.end()) 2447 return I->second; 2448 2449 unsigned UnadjustedAlign; 2450 if (const auto *RT = T->getAs<RecordType>()) { 2451 const RecordDecl *RD = RT->getDecl(); 2452 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2453 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2454 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2455 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2456 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2457 } else { 2458 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2459 } 2460 2461 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2462 return UnadjustedAlign; 2463 } 2464 2465 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2466 unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); 2467 return SimdAlign; 2468 } 2469 2470 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2471 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2472 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2473 } 2474 2475 /// toBits - Convert a size in characters to a size in characters. 2476 int64_t ASTContext::toBits(CharUnits CharSize) const { 2477 return CharSize.getQuantity() * getCharWidth(); 2478 } 2479 2480 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2481 /// This method does not work on incomplete types. 2482 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2483 return getTypeInfoInChars(T).Width; 2484 } 2485 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2486 return getTypeInfoInChars(T).Width; 2487 } 2488 2489 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2490 /// characters. This method does not work on incomplete types. 2491 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2492 return toCharUnitsFromBits(getTypeAlign(T)); 2493 } 2494 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2495 return toCharUnitsFromBits(getTypeAlign(T)); 2496 } 2497 2498 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2499 /// type, in characters, before alignment adjustments. This method does 2500 /// not work on incomplete types. 2501 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2502 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2503 } 2504 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2505 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2506 } 2507 2508 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2509 /// type for the current target in bits. This can be different than the ABI 2510 /// alignment in cases where it is beneficial for performance or backwards 2511 /// compatibility preserving to overalign a data type. (Note: despite the name, 2512 /// the preferred alignment is ABI-impacting, and not an optimization.) 2513 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2514 TypeInfo TI = getTypeInfo(T); 2515 unsigned ABIAlign = TI.Align; 2516 2517 T = T->getBaseElementTypeUnsafe(); 2518 2519 // The preferred alignment of member pointers is that of a pointer. 2520 if (T->isMemberPointerType()) 2521 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2522 2523 if (!Target->allowsLargerPreferedTypeAlignment()) 2524 return ABIAlign; 2525 2526 if (const auto *RT = T->getAs<RecordType>()) { 2527 const RecordDecl *RD = RT->getDecl(); 2528 2529 // When used as part of a typedef, or together with a 'packed' attribute, 2530 // the 'aligned' attribute can be used to decrease alignment. Note that the 2531 // 'packed' case is already taken into consideration when computing the 2532 // alignment, we only need to handle the typedef case here. 2533 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2534 RD->isInvalidDecl()) 2535 return ABIAlign; 2536 2537 unsigned PreferredAlign = static_cast<unsigned>( 2538 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2539 assert(PreferredAlign >= ABIAlign && 2540 "PreferredAlign should be at least as large as ABIAlign."); 2541 return PreferredAlign; 2542 } 2543 2544 // Double (and, for targets supporting AIX `power` alignment, long double) and 2545 // long long should be naturally aligned (despite requiring less alignment) if 2546 // possible. 2547 if (const auto *CT = T->getAs<ComplexType>()) 2548 T = CT->getElementType().getTypePtr(); 2549 if (const auto *ET = T->getAs<EnumType>()) 2550 T = ET->getDecl()->getIntegerType().getTypePtr(); 2551 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2552 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2553 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2554 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2555 Target->defaultsToAIXPowerAlignment())) 2556 // Don't increase the alignment if an alignment attribute was specified on a 2557 // typedef declaration. 2558 if (!TI.isAlignRequired()) 2559 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2560 2561 return ABIAlign; 2562 } 2563 2564 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2565 /// for __attribute__((aligned)) on this target, to be used if no alignment 2566 /// value is specified. 2567 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2568 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2569 } 2570 2571 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2572 /// to a global variable of the specified type. 2573 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2574 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2575 return std::max(getPreferredTypeAlign(T), 2576 getTargetInfo().getMinGlobalAlign(TypeSize)); 2577 } 2578 2579 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2580 /// should be given to a global variable of the specified type. 2581 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2582 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2583 } 2584 2585 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2586 CharUnits Offset = CharUnits::Zero(); 2587 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2588 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2589 Offset += Layout->getBaseClassOffset(Base); 2590 Layout = &getASTRecordLayout(Base); 2591 } 2592 return Offset; 2593 } 2594 2595 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2596 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2597 CharUnits ThisAdjustment = CharUnits::Zero(); 2598 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2599 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2600 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2601 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2602 const CXXRecordDecl *Base = RD; 2603 const CXXRecordDecl *Derived = Path[I]; 2604 if (DerivedMember) 2605 std::swap(Base, Derived); 2606 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2607 RD = Path[I]; 2608 } 2609 if (DerivedMember) 2610 ThisAdjustment = -ThisAdjustment; 2611 return ThisAdjustment; 2612 } 2613 2614 /// DeepCollectObjCIvars - 2615 /// This routine first collects all declared, but not synthesized, ivars in 2616 /// super class and then collects all ivars, including those synthesized for 2617 /// current class. This routine is used for implementation of current class 2618 /// when all ivars, declared and synthesized are known. 2619 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2620 bool leafClass, 2621 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2622 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2623 DeepCollectObjCIvars(SuperClass, false, Ivars); 2624 if (!leafClass) { 2625 llvm::append_range(Ivars, OI->ivars()); 2626 } else { 2627 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2628 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2629 Iv= Iv->getNextIvar()) 2630 Ivars.push_back(Iv); 2631 } 2632 } 2633 2634 /// CollectInheritedProtocols - Collect all protocols in current class and 2635 /// those inherited by it. 2636 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2637 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2638 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2639 // We can use protocol_iterator here instead of 2640 // all_referenced_protocol_iterator since we are walking all categories. 2641 for (auto *Proto : OI->all_referenced_protocols()) { 2642 CollectInheritedProtocols(Proto, Protocols); 2643 } 2644 2645 // Categories of this Interface. 2646 for (const auto *Cat : OI->visible_categories()) 2647 CollectInheritedProtocols(Cat, Protocols); 2648 2649 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2650 while (SD) { 2651 CollectInheritedProtocols(SD, Protocols); 2652 SD = SD->getSuperClass(); 2653 } 2654 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2655 for (auto *Proto : OC->protocols()) { 2656 CollectInheritedProtocols(Proto, Protocols); 2657 } 2658 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2659 // Insert the protocol. 2660 if (!Protocols.insert( 2661 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2662 return; 2663 2664 for (auto *Proto : OP->protocols()) 2665 CollectInheritedProtocols(Proto, Protocols); 2666 } 2667 } 2668 2669 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2670 const RecordDecl *RD) { 2671 assert(RD->isUnion() && "Must be union type"); 2672 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2673 2674 for (const auto *Field : RD->fields()) { 2675 if (!Context.hasUniqueObjectRepresentations(Field->getType())) 2676 return false; 2677 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2678 if (FieldSize != UnionSize) 2679 return false; 2680 } 2681 return !RD->field_empty(); 2682 } 2683 2684 static int64_t getSubobjectOffset(const FieldDecl *Field, 2685 const ASTContext &Context, 2686 const clang::ASTRecordLayout & /*Layout*/) { 2687 return Context.getFieldOffset(Field); 2688 } 2689 2690 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2691 const ASTContext &Context, 2692 const clang::ASTRecordLayout &Layout) { 2693 return Context.toBits(Layout.getBaseClassOffset(RD)); 2694 } 2695 2696 static std::optional<int64_t> 2697 structHasUniqueObjectRepresentations(const ASTContext &Context, 2698 const RecordDecl *RD); 2699 2700 static std::optional<int64_t> 2701 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) { 2702 if (Field->getType()->isRecordType()) { 2703 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2704 if (!RD->isUnion()) 2705 return structHasUniqueObjectRepresentations(Context, RD); 2706 } 2707 2708 // A _BitInt type may not be unique if it has padding bits 2709 // but if it is a bitfield the padding bits are not used. 2710 bool IsBitIntType = Field->getType()->isBitIntType(); 2711 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2712 !Context.hasUniqueObjectRepresentations(Field->getType())) 2713 return std::nullopt; 2714 2715 int64_t FieldSizeInBits = 2716 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2717 if (Field->isBitField()) { 2718 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2719 if (IsBitIntType) { 2720 if ((unsigned)BitfieldSize > 2721 cast<BitIntType>(Field->getType())->getNumBits()) 2722 return std::nullopt; 2723 } else if (BitfieldSize > FieldSizeInBits) { 2724 return std::nullopt; 2725 } 2726 FieldSizeInBits = BitfieldSize; 2727 } else if (IsBitIntType && 2728 !Context.hasUniqueObjectRepresentations(Field->getType())) { 2729 return std::nullopt; 2730 } 2731 return FieldSizeInBits; 2732 } 2733 2734 static std::optional<int64_t> 2735 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context) { 2736 return structHasUniqueObjectRepresentations(Context, RD); 2737 } 2738 2739 template <typename RangeT> 2740 static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2741 const RangeT &Subobjects, int64_t CurOffsetInBits, 2742 const ASTContext &Context, const clang::ASTRecordLayout &Layout) { 2743 for (const auto *Subobject : Subobjects) { 2744 std::optional<int64_t> SizeInBits = 2745 getSubobjectSizeInBits(Subobject, Context); 2746 if (!SizeInBits) 2747 return std::nullopt; 2748 if (*SizeInBits != 0) { 2749 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2750 if (Offset != CurOffsetInBits) 2751 return std::nullopt; 2752 CurOffsetInBits += *SizeInBits; 2753 } 2754 } 2755 return CurOffsetInBits; 2756 } 2757 2758 static std::optional<int64_t> 2759 structHasUniqueObjectRepresentations(const ASTContext &Context, 2760 const RecordDecl *RD) { 2761 assert(!RD->isUnion() && "Must be struct/class type"); 2762 const auto &Layout = Context.getASTRecordLayout(RD); 2763 2764 int64_t CurOffsetInBits = 0; 2765 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2766 if (ClassDecl->isDynamicClass()) 2767 return std::nullopt; 2768 2769 SmallVector<CXXRecordDecl *, 4> Bases; 2770 for (const auto &Base : ClassDecl->bases()) { 2771 // Empty types can be inherited from, and non-empty types can potentially 2772 // have tail padding, so just make sure there isn't an error. 2773 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2774 } 2775 2776 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2777 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2778 }); 2779 2780 std::optional<int64_t> OffsetAfterBases = 2781 structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits, 2782 Context, Layout); 2783 if (!OffsetAfterBases) 2784 return std::nullopt; 2785 CurOffsetInBits = *OffsetAfterBases; 2786 } 2787 2788 std::optional<int64_t> OffsetAfterFields = 2789 structSubobjectsHaveUniqueObjectRepresentations( 2790 RD->fields(), CurOffsetInBits, Context, Layout); 2791 if (!OffsetAfterFields) 2792 return std::nullopt; 2793 CurOffsetInBits = *OffsetAfterFields; 2794 2795 return CurOffsetInBits; 2796 } 2797 2798 bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { 2799 // C++17 [meta.unary.prop]: 2800 // The predicate condition for a template specialization 2801 // has_unique_object_representations<T> shall be 2802 // satisfied if and only if: 2803 // (9.1) - T is trivially copyable, and 2804 // (9.2) - any two objects of type T with the same value have the same 2805 // object representation, where two objects 2806 // of array or non-union class type are considered to have the same value 2807 // if their respective sequences of 2808 // direct subobjects have the same values, and two objects of union type 2809 // are considered to have the same 2810 // value if they have the same active member and the corresponding members 2811 // have the same value. 2812 // The set of scalar types for which this condition holds is 2813 // implementation-defined. [ Note: If a type has padding 2814 // bits, the condition does not hold; otherwise, the condition holds true 2815 // for unsigned integral types. -- end note ] 2816 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2817 2818 // Arrays are unique only if their element type is unique. 2819 if (Ty->isArrayType()) 2820 return hasUniqueObjectRepresentations(getBaseElementType(Ty)); 2821 2822 // (9.1) - T is trivially copyable... 2823 if (!Ty.isTriviallyCopyableType(*this)) 2824 return false; 2825 2826 // All integrals and enums are unique. 2827 if (Ty->isIntegralOrEnumerationType()) { 2828 // Except _BitInt types that have padding bits. 2829 if (const auto *BIT = dyn_cast<BitIntType>(Ty)) 2830 return getTypeSize(BIT) == BIT->getNumBits(); 2831 2832 return true; 2833 } 2834 2835 // All other pointers are unique. 2836 if (Ty->isPointerType()) 2837 return true; 2838 2839 if (Ty->isMemberPointerType()) { 2840 const auto *MPT = Ty->getAs<MemberPointerType>(); 2841 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2842 } 2843 2844 if (Ty->isRecordType()) { 2845 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2846 2847 if (Record->isInvalidDecl()) 2848 return false; 2849 2850 if (Record->isUnion()) 2851 return unionHasUniqueObjectRepresentations(*this, Record); 2852 2853 std::optional<int64_t> StructSize = 2854 structHasUniqueObjectRepresentations(*this, Record); 2855 2856 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); 2857 } 2858 2859 // FIXME: More cases to handle here (list by rsmith): 2860 // vectors (careful about, eg, vector of 3 foo) 2861 // _Complex int and friends 2862 // _Atomic T 2863 // Obj-C block pointers 2864 // Obj-C object pointers 2865 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2866 // clk_event_t, queue_t, reserve_id_t) 2867 // There're also Obj-C class types and the Obj-C selector type, but I think it 2868 // makes sense for those to return false here. 2869 2870 return false; 2871 } 2872 2873 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2874 unsigned count = 0; 2875 // Count ivars declared in class extension. 2876 for (const auto *Ext : OI->known_extensions()) 2877 count += Ext->ivar_size(); 2878 2879 // Count ivar defined in this class's implementation. This 2880 // includes synthesized ivars. 2881 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2882 count += ImplDecl->ivar_size(); 2883 2884 return count; 2885 } 2886 2887 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2888 if (!E) 2889 return false; 2890 2891 // nullptr_t is always treated as null. 2892 if (E->getType()->isNullPtrType()) return true; 2893 2894 if (E->getType()->isAnyPointerType() && 2895 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2896 Expr::NPC_ValueDependentIsNull)) 2897 return true; 2898 2899 // Unfortunately, __null has type 'int'. 2900 if (isa<GNUNullExpr>(E)) return true; 2901 2902 return false; 2903 } 2904 2905 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2906 /// exists. 2907 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2908 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2909 I = ObjCImpls.find(D); 2910 if (I != ObjCImpls.end()) 2911 return cast<ObjCImplementationDecl>(I->second); 2912 return nullptr; 2913 } 2914 2915 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2916 /// exists. 2917 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2918 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2919 I = ObjCImpls.find(D); 2920 if (I != ObjCImpls.end()) 2921 return cast<ObjCCategoryImplDecl>(I->second); 2922 return nullptr; 2923 } 2924 2925 /// Set the implementation of ObjCInterfaceDecl. 2926 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2927 ObjCImplementationDecl *ImplD) { 2928 assert(IFaceD && ImplD && "Passed null params"); 2929 ObjCImpls[IFaceD] = ImplD; 2930 } 2931 2932 /// Set the implementation of ObjCCategoryDecl. 2933 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2934 ObjCCategoryImplDecl *ImplD) { 2935 assert(CatD && ImplD && "Passed null params"); 2936 ObjCImpls[CatD] = ImplD; 2937 } 2938 2939 const ObjCMethodDecl * 2940 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2941 return ObjCMethodRedecls.lookup(MD); 2942 } 2943 2944 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2945 const ObjCMethodDecl *Redecl) { 2946 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2947 ObjCMethodRedecls[MD] = Redecl; 2948 } 2949 2950 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2951 const NamedDecl *ND) const { 2952 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2953 return ID; 2954 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2955 return CD->getClassInterface(); 2956 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2957 return IMD->getClassInterface(); 2958 2959 return nullptr; 2960 } 2961 2962 /// Get the copy initialization expression of VarDecl, or nullptr if 2963 /// none exists. 2964 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2965 assert(VD && "Passed null params"); 2966 assert(VD->hasAttr<BlocksAttr>() && 2967 "getBlockVarCopyInits - not __block var"); 2968 auto I = BlockVarCopyInits.find(VD); 2969 if (I != BlockVarCopyInits.end()) 2970 return I->second; 2971 return {nullptr, false}; 2972 } 2973 2974 /// Set the copy initialization expression of a block var decl. 2975 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2976 bool CanThrow) { 2977 assert(VD && CopyExpr && "Passed null params"); 2978 assert(VD->hasAttr<BlocksAttr>() && 2979 "setBlockVarCopyInits - not __block var"); 2980 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2981 } 2982 2983 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2984 unsigned DataSize) const { 2985 if (!DataSize) 2986 DataSize = TypeLoc::getFullDataSizeForType(T); 2987 else 2988 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2989 "incorrect data size provided to CreateTypeSourceInfo!"); 2990 2991 auto *TInfo = 2992 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2993 new (TInfo) TypeSourceInfo(T); 2994 return TInfo; 2995 } 2996 2997 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2998 SourceLocation L) const { 2999 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 3000 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 3001 return DI; 3002 } 3003 3004 const ASTRecordLayout & 3005 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 3006 return getObjCLayout(D, nullptr); 3007 } 3008 3009 const ASTRecordLayout & 3010 ASTContext::getASTObjCImplementationLayout( 3011 const ObjCImplementationDecl *D) const { 3012 return getObjCLayout(D->getClassInterface(), D); 3013 } 3014 3015 static auto getCanonicalTemplateArguments(const ASTContext &C, 3016 ArrayRef<TemplateArgument> Args, 3017 bool &AnyNonCanonArgs) { 3018 SmallVector<TemplateArgument, 16> CanonArgs(Args); 3019 for (auto &Arg : CanonArgs) { 3020 TemplateArgument OrigArg = Arg; 3021 Arg = C.getCanonicalTemplateArgument(Arg); 3022 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg); 3023 } 3024 return CanonArgs; 3025 } 3026 3027 //===----------------------------------------------------------------------===// 3028 // Type creation/memoization methods 3029 //===----------------------------------------------------------------------===// 3030 3031 QualType 3032 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 3033 unsigned fastQuals = quals.getFastQualifiers(); 3034 quals.removeFastQualifiers(); 3035 3036 // Check if we've already instantiated this type. 3037 llvm::FoldingSetNodeID ID; 3038 ExtQuals::Profile(ID, baseType, quals); 3039 void *insertPos = nullptr; 3040 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 3041 assert(eq->getQualifiers() == quals); 3042 return QualType(eq, fastQuals); 3043 } 3044 3045 // If the base type is not canonical, make the appropriate canonical type. 3046 QualType canon; 3047 if (!baseType->isCanonicalUnqualified()) { 3048 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 3049 canonSplit.Quals.addConsistentQualifiers(quals); 3050 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3051 3052 // Re-find the insert position. 3053 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3054 } 3055 3056 auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 3057 ExtQualNodes.InsertNode(eq, insertPos); 3058 return QualType(eq, fastQuals); 3059 } 3060 3061 QualType ASTContext::getAddrSpaceQualType(QualType T, 3062 LangAS AddressSpace) const { 3063 QualType CanT = getCanonicalType(T); 3064 if (CanT.getAddressSpace() == AddressSpace) 3065 return T; 3066 3067 // If we are composing extended qualifiers together, merge together 3068 // into one ExtQuals node. 3069 QualifierCollector Quals; 3070 const Type *TypeNode = Quals.strip(T); 3071 3072 // If this type already has an address space specified, it cannot get 3073 // another one. 3074 assert(!Quals.hasAddressSpace() && 3075 "Type cannot be in multiple addr spaces!"); 3076 Quals.addAddressSpace(AddressSpace); 3077 3078 return getExtQualType(TypeNode, Quals); 3079 } 3080 3081 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3082 // If the type is not qualified with an address space, just return it 3083 // immediately. 3084 if (!T.hasAddressSpace()) 3085 return T; 3086 3087 // If we are composing extended qualifiers together, merge together 3088 // into one ExtQuals node. 3089 QualifierCollector Quals; 3090 const Type *TypeNode; 3091 3092 while (T.hasAddressSpace()) { 3093 TypeNode = Quals.strip(T); 3094 3095 // If the type no longer has an address space after stripping qualifiers, 3096 // jump out. 3097 if (!QualType(TypeNode, 0).hasAddressSpace()) 3098 break; 3099 3100 // There might be sugar in the way. Strip it and try again. 3101 T = T.getSingleStepDesugaredType(*this); 3102 } 3103 3104 Quals.removeAddressSpace(); 3105 3106 // Removal of the address space can mean there are no longer any 3107 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3108 // or required. 3109 if (Quals.hasNonFastQualifiers()) 3110 return getExtQualType(TypeNode, Quals); 3111 else 3112 return QualType(TypeNode, Quals.getFastQualifiers()); 3113 } 3114 3115 QualType ASTContext::getObjCGCQualType(QualType T, 3116 Qualifiers::GC GCAttr) const { 3117 QualType CanT = getCanonicalType(T); 3118 if (CanT.getObjCGCAttr() == GCAttr) 3119 return T; 3120 3121 if (const auto *ptr = T->getAs<PointerType>()) { 3122 QualType Pointee = ptr->getPointeeType(); 3123 if (Pointee->isAnyPointerType()) { 3124 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3125 return getPointerType(ResultType); 3126 } 3127 } 3128 3129 // If we are composing extended qualifiers together, merge together 3130 // into one ExtQuals node. 3131 QualifierCollector Quals; 3132 const Type *TypeNode = Quals.strip(T); 3133 3134 // If this type already has an ObjCGC specified, it cannot get 3135 // another one. 3136 assert(!Quals.hasObjCGCAttr() && 3137 "Type cannot have multiple ObjCGCs!"); 3138 Quals.addObjCGCAttr(GCAttr); 3139 3140 return getExtQualType(TypeNode, Quals); 3141 } 3142 3143 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3144 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3145 QualType Pointee = Ptr->getPointeeType(); 3146 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3147 return getPointerType(removeAddrSpaceQualType(Pointee)); 3148 } 3149 } 3150 return T; 3151 } 3152 3153 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3154 FunctionType::ExtInfo Info) { 3155 if (T->getExtInfo() == Info) 3156 return T; 3157 3158 QualType Result; 3159 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3160 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3161 } else { 3162 const auto *FPT = cast<FunctionProtoType>(T); 3163 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3164 EPI.ExtInfo = Info; 3165 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3166 } 3167 3168 return cast<FunctionType>(Result.getTypePtr()); 3169 } 3170 3171 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3172 QualType ResultType) { 3173 FD = FD->getMostRecentDecl(); 3174 while (true) { 3175 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3176 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3177 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3178 if (FunctionDecl *Next = FD->getPreviousDecl()) 3179 FD = Next; 3180 else 3181 break; 3182 } 3183 if (ASTMutationListener *L = getASTMutationListener()) 3184 L->DeducedReturnType(FD, ResultType); 3185 } 3186 3187 /// Get a function type and produce the equivalent function type with the 3188 /// specified exception specification. Type sugar that can be present on a 3189 /// declaration of a function with an exception specification is permitted 3190 /// and preserved. Other type sugar (for instance, typedefs) is not. 3191 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3192 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { 3193 // Might have some parens. 3194 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3195 return getParenType( 3196 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3197 3198 // Might be wrapped in a macro qualified type. 3199 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3200 return getMacroQualifiedType( 3201 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3202 MQT->getMacroIdentifier()); 3203 3204 // Might have a calling-convention attribute. 3205 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3206 return getAttributedType( 3207 AT->getAttrKind(), 3208 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3209 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3210 3211 // Anything else must be a function type. Rebuild it with the new exception 3212 // specification. 3213 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3214 return getFunctionType( 3215 Proto->getReturnType(), Proto->getParamTypes(), 3216 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3217 } 3218 3219 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3220 QualType U) const { 3221 return hasSameType(T, U) || 3222 (getLangOpts().CPlusPlus17 && 3223 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3224 getFunctionTypeWithExceptionSpec(U, EST_None))); 3225 } 3226 3227 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3228 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3229 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3230 SmallVector<QualType, 16> Args(Proto->param_types().size()); 3231 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3232 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]); 3233 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3234 } 3235 3236 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3237 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3238 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3239 } 3240 3241 return T; 3242 } 3243 3244 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3245 return hasSameType(T, U) || 3246 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3247 getFunctionTypeWithoutPtrSizes(U)); 3248 } 3249 3250 void ASTContext::adjustExceptionSpec( 3251 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3252 bool AsWritten) { 3253 // Update the type. 3254 QualType Updated = 3255 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3256 FD->setType(Updated); 3257 3258 if (!AsWritten) 3259 return; 3260 3261 // Update the type in the type source information too. 3262 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3263 // If the type and the type-as-written differ, we may need to update 3264 // the type-as-written too. 3265 if (TSInfo->getType() != FD->getType()) 3266 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3267 3268 // FIXME: When we get proper type location information for exceptions, 3269 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3270 // up the TypeSourceInfo; 3271 assert(TypeLoc::getFullDataSizeForType(Updated) == 3272 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3273 "TypeLoc size mismatch from updating exception specification"); 3274 TSInfo->overrideType(Updated); 3275 } 3276 } 3277 3278 /// getComplexType - Return the uniqued reference to the type for a complex 3279 /// number with the specified element type. 3280 QualType ASTContext::getComplexType(QualType T) const { 3281 // Unique pointers, to guarantee there is only one pointer of a particular 3282 // structure. 3283 llvm::FoldingSetNodeID ID; 3284 ComplexType::Profile(ID, T); 3285 3286 void *InsertPos = nullptr; 3287 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3288 return QualType(CT, 0); 3289 3290 // If the pointee type isn't canonical, this won't be a canonical type either, 3291 // so fill in the canonical type field. 3292 QualType Canonical; 3293 if (!T.isCanonical()) { 3294 Canonical = getComplexType(getCanonicalType(T)); 3295 3296 // Get the new insert position for the node we care about. 3297 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3298 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3299 } 3300 auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 3301 Types.push_back(New); 3302 ComplexTypes.InsertNode(New, InsertPos); 3303 return QualType(New, 0); 3304 } 3305 3306 /// getPointerType - Return the uniqued reference to the type for a pointer to 3307 /// the specified type. 3308 QualType ASTContext::getPointerType(QualType T) const { 3309 // Unique pointers, to guarantee there is only one pointer of a particular 3310 // structure. 3311 llvm::FoldingSetNodeID ID; 3312 PointerType::Profile(ID, T); 3313 3314 void *InsertPos = nullptr; 3315 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3316 return QualType(PT, 0); 3317 3318 // If the pointee type isn't canonical, this won't be a canonical type either, 3319 // so fill in the canonical type field. 3320 QualType Canonical; 3321 if (!T.isCanonical()) { 3322 Canonical = getPointerType(getCanonicalType(T)); 3323 3324 // Get the new insert position for the node we care about. 3325 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3326 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3327 } 3328 auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); 3329 Types.push_back(New); 3330 PointerTypes.InsertNode(New, InsertPos); 3331 return QualType(New, 0); 3332 } 3333 3334 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3335 llvm::FoldingSetNodeID ID; 3336 AdjustedType::Profile(ID, Orig, New); 3337 void *InsertPos = nullptr; 3338 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3339 if (AT) 3340 return QualType(AT, 0); 3341 3342 QualType Canonical = getCanonicalType(New); 3343 3344 // Get the new insert position for the node we care about. 3345 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3346 assert(!AT && "Shouldn't be in the map!"); 3347 3348 AT = new (*this, TypeAlignment) 3349 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3350 Types.push_back(AT); 3351 AdjustedTypes.InsertNode(AT, InsertPos); 3352 return QualType(AT, 0); 3353 } 3354 3355 QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const { 3356 llvm::FoldingSetNodeID ID; 3357 AdjustedType::Profile(ID, Orig, Decayed); 3358 void *InsertPos = nullptr; 3359 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3360 if (AT) 3361 return QualType(AT, 0); 3362 3363 QualType Canonical = getCanonicalType(Decayed); 3364 3365 // Get the new insert position for the node we care about. 3366 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3367 assert(!AT && "Shouldn't be in the map!"); 3368 3369 AT = new (*this, TypeAlignment) DecayedType(Orig, Decayed, Canonical); 3370 Types.push_back(AT); 3371 AdjustedTypes.InsertNode(AT, InsertPos); 3372 return QualType(AT, 0); 3373 } 3374 3375 QualType ASTContext::getDecayedType(QualType T) const { 3376 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3377 3378 QualType Decayed; 3379 3380 // C99 6.7.5.3p7: 3381 // A declaration of a parameter as "array of type" shall be 3382 // adjusted to "qualified pointer to type", where the type 3383 // qualifiers (if any) are those specified within the [ and ] of 3384 // the array type derivation. 3385 if (T->isArrayType()) 3386 Decayed = getArrayDecayedType(T); 3387 3388 // C99 6.7.5.3p8: 3389 // A declaration of a parameter as "function returning type" 3390 // shall be adjusted to "pointer to function returning type", as 3391 // in 6.3.2.1. 3392 if (T->isFunctionType()) 3393 Decayed = getPointerType(T); 3394 3395 return getDecayedType(T, Decayed); 3396 } 3397 3398 /// getBlockPointerType - Return the uniqued reference to the type for 3399 /// a pointer to the specified block. 3400 QualType ASTContext::getBlockPointerType(QualType T) const { 3401 assert(T->isFunctionType() && "block of function types only"); 3402 // Unique pointers, to guarantee there is only one block of a particular 3403 // structure. 3404 llvm::FoldingSetNodeID ID; 3405 BlockPointerType::Profile(ID, T); 3406 3407 void *InsertPos = nullptr; 3408 if (BlockPointerType *PT = 3409 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3410 return QualType(PT, 0); 3411 3412 // If the block pointee type isn't canonical, this won't be a canonical 3413 // type either so fill in the canonical type field. 3414 QualType Canonical; 3415 if (!T.isCanonical()) { 3416 Canonical = getBlockPointerType(getCanonicalType(T)); 3417 3418 // Get the new insert position for the node we care about. 3419 BlockPointerType *NewIP = 3420 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3421 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3422 } 3423 auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 3424 Types.push_back(New); 3425 BlockPointerTypes.InsertNode(New, InsertPos); 3426 return QualType(New, 0); 3427 } 3428 3429 /// getLValueReferenceType - Return the uniqued reference to the type for an 3430 /// lvalue reference to the specified type. 3431 QualType 3432 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3433 assert((!T->isPlaceholderType() || 3434 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3435 "Unresolved placeholder type"); 3436 3437 // Unique pointers, to guarantee there is only one pointer of a particular 3438 // structure. 3439 llvm::FoldingSetNodeID ID; 3440 ReferenceType::Profile(ID, T, SpelledAsLValue); 3441 3442 void *InsertPos = nullptr; 3443 if (LValueReferenceType *RT = 3444 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3445 return QualType(RT, 0); 3446 3447 const auto *InnerRef = T->getAs<ReferenceType>(); 3448 3449 // If the referencee type isn't canonical, this won't be a canonical type 3450 // either, so fill in the canonical type field. 3451 QualType Canonical; 3452 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3453 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3454 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3455 3456 // Get the new insert position for the node we care about. 3457 LValueReferenceType *NewIP = 3458 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3459 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3460 } 3461 3462 auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 3463 SpelledAsLValue); 3464 Types.push_back(New); 3465 LValueReferenceTypes.InsertNode(New, InsertPos); 3466 3467 return QualType(New, 0); 3468 } 3469 3470 /// getRValueReferenceType - Return the uniqued reference to the type for an 3471 /// rvalue reference to the specified type. 3472 QualType ASTContext::getRValueReferenceType(QualType T) const { 3473 assert((!T->isPlaceholderType() || 3474 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3475 "Unresolved placeholder type"); 3476 3477 // Unique pointers, to guarantee there is only one pointer of a particular 3478 // structure. 3479 llvm::FoldingSetNodeID ID; 3480 ReferenceType::Profile(ID, T, false); 3481 3482 void *InsertPos = nullptr; 3483 if (RValueReferenceType *RT = 3484 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3485 return QualType(RT, 0); 3486 3487 const auto *InnerRef = T->getAs<ReferenceType>(); 3488 3489 // If the referencee type isn't canonical, this won't be a canonical type 3490 // either, so fill in the canonical type field. 3491 QualType Canonical; 3492 if (InnerRef || !T.isCanonical()) { 3493 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3494 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3495 3496 // Get the new insert position for the node we care about. 3497 RValueReferenceType *NewIP = 3498 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3499 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3500 } 3501 3502 auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 3503 Types.push_back(New); 3504 RValueReferenceTypes.InsertNode(New, InsertPos); 3505 return QualType(New, 0); 3506 } 3507 3508 /// getMemberPointerType - Return the uniqued reference to the type for a 3509 /// member pointer to the specified type, in the specified class. 3510 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3511 // Unique pointers, to guarantee there is only one pointer of a particular 3512 // structure. 3513 llvm::FoldingSetNodeID ID; 3514 MemberPointerType::Profile(ID, T, Cls); 3515 3516 void *InsertPos = nullptr; 3517 if (MemberPointerType *PT = 3518 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3519 return QualType(PT, 0); 3520 3521 // If the pointee or class type isn't canonical, this won't be a canonical 3522 // type either, so fill in the canonical type field. 3523 QualType Canonical; 3524 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3525 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3526 3527 // Get the new insert position for the node we care about. 3528 MemberPointerType *NewIP = 3529 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3530 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3531 } 3532 auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 3533 Types.push_back(New); 3534 MemberPointerTypes.InsertNode(New, InsertPos); 3535 return QualType(New, 0); 3536 } 3537 3538 /// getConstantArrayType - Return the unique reference to the type for an 3539 /// array of the specified element type. 3540 QualType ASTContext::getConstantArrayType(QualType EltTy, 3541 const llvm::APInt &ArySizeIn, 3542 const Expr *SizeExpr, 3543 ArrayType::ArraySizeModifier ASM, 3544 unsigned IndexTypeQuals) const { 3545 assert((EltTy->isDependentType() || 3546 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3547 "Constant array of VLAs is illegal!"); 3548 3549 // We only need the size as part of the type if it's instantiation-dependent. 3550 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3551 SizeExpr = nullptr; 3552 3553 // Convert the array size into a canonical width matching the pointer size for 3554 // the target. 3555 llvm::APInt ArySize(ArySizeIn); 3556 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3557 3558 llvm::FoldingSetNodeID ID; 3559 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3560 IndexTypeQuals); 3561 3562 void *InsertPos = nullptr; 3563 if (ConstantArrayType *ATP = 3564 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3565 return QualType(ATP, 0); 3566 3567 // If the element type isn't canonical or has qualifiers, or the array bound 3568 // is instantiation-dependent, this won't be a canonical type either, so fill 3569 // in the canonical type field. 3570 QualType Canon; 3571 // FIXME: Check below should look for qualifiers behind sugar. 3572 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3573 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3574 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3575 ASM, IndexTypeQuals); 3576 Canon = getQualifiedType(Canon, canonSplit.Quals); 3577 3578 // Get the new insert position for the node we care about. 3579 ConstantArrayType *NewIP = 3580 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3581 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3582 } 3583 3584 void *Mem = Allocate( 3585 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3586 TypeAlignment); 3587 auto *New = new (Mem) 3588 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3589 ConstantArrayTypes.InsertNode(New, InsertPos); 3590 Types.push_back(New); 3591 return QualType(New, 0); 3592 } 3593 3594 /// getVariableArrayDecayedType - Turns the given type, which may be 3595 /// variably-modified, into the corresponding type with all the known 3596 /// sizes replaced with [*]. 3597 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3598 // Vastly most common case. 3599 if (!type->isVariablyModifiedType()) return type; 3600 3601 QualType result; 3602 3603 SplitQualType split = type.getSplitDesugaredType(); 3604 const Type *ty = split.Ty; 3605 switch (ty->getTypeClass()) { 3606 #define TYPE(Class, Base) 3607 #define ABSTRACT_TYPE(Class, Base) 3608 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3609 #include "clang/AST/TypeNodes.inc" 3610 llvm_unreachable("didn't desugar past all non-canonical types?"); 3611 3612 // These types should never be variably-modified. 3613 case Type::Builtin: 3614 case Type::Complex: 3615 case Type::Vector: 3616 case Type::DependentVector: 3617 case Type::ExtVector: 3618 case Type::DependentSizedExtVector: 3619 case Type::ConstantMatrix: 3620 case Type::DependentSizedMatrix: 3621 case Type::DependentAddressSpace: 3622 case Type::ObjCObject: 3623 case Type::ObjCInterface: 3624 case Type::ObjCObjectPointer: 3625 case Type::Record: 3626 case Type::Enum: 3627 case Type::UnresolvedUsing: 3628 case Type::TypeOfExpr: 3629 case Type::TypeOf: 3630 case Type::Decltype: 3631 case Type::UnaryTransform: 3632 case Type::DependentName: 3633 case Type::InjectedClassName: 3634 case Type::TemplateSpecialization: 3635 case Type::DependentTemplateSpecialization: 3636 case Type::TemplateTypeParm: 3637 case Type::SubstTemplateTypeParmPack: 3638 case Type::Auto: 3639 case Type::DeducedTemplateSpecialization: 3640 case Type::PackExpansion: 3641 case Type::BitInt: 3642 case Type::DependentBitInt: 3643 llvm_unreachable("type should never be variably-modified"); 3644 3645 // These types can be variably-modified but should never need to 3646 // further decay. 3647 case Type::FunctionNoProto: 3648 case Type::FunctionProto: 3649 case Type::BlockPointer: 3650 case Type::MemberPointer: 3651 case Type::Pipe: 3652 return type; 3653 3654 // These types can be variably-modified. All these modifications 3655 // preserve structure except as noted by comments. 3656 // TODO: if we ever care about optimizing VLAs, there are no-op 3657 // optimizations available here. 3658 case Type::Pointer: 3659 result = getPointerType(getVariableArrayDecayedType( 3660 cast<PointerType>(ty)->getPointeeType())); 3661 break; 3662 3663 case Type::LValueReference: { 3664 const auto *lv = cast<LValueReferenceType>(ty); 3665 result = getLValueReferenceType( 3666 getVariableArrayDecayedType(lv->getPointeeType()), 3667 lv->isSpelledAsLValue()); 3668 break; 3669 } 3670 3671 case Type::RValueReference: { 3672 const auto *lv = cast<RValueReferenceType>(ty); 3673 result = getRValueReferenceType( 3674 getVariableArrayDecayedType(lv->getPointeeType())); 3675 break; 3676 } 3677 3678 case Type::Atomic: { 3679 const auto *at = cast<AtomicType>(ty); 3680 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3681 break; 3682 } 3683 3684 case Type::ConstantArray: { 3685 const auto *cat = cast<ConstantArrayType>(ty); 3686 result = getConstantArrayType( 3687 getVariableArrayDecayedType(cat->getElementType()), 3688 cat->getSize(), 3689 cat->getSizeExpr(), 3690 cat->getSizeModifier(), 3691 cat->getIndexTypeCVRQualifiers()); 3692 break; 3693 } 3694 3695 case Type::DependentSizedArray: { 3696 const auto *dat = cast<DependentSizedArrayType>(ty); 3697 result = getDependentSizedArrayType( 3698 getVariableArrayDecayedType(dat->getElementType()), 3699 dat->getSizeExpr(), 3700 dat->getSizeModifier(), 3701 dat->getIndexTypeCVRQualifiers(), 3702 dat->getBracketsRange()); 3703 break; 3704 } 3705 3706 // Turn incomplete types into [*] types. 3707 case Type::IncompleteArray: { 3708 const auto *iat = cast<IncompleteArrayType>(ty); 3709 result = getVariableArrayType( 3710 getVariableArrayDecayedType(iat->getElementType()), 3711 /*size*/ nullptr, 3712 ArrayType::Normal, 3713 iat->getIndexTypeCVRQualifiers(), 3714 SourceRange()); 3715 break; 3716 } 3717 3718 // Turn VLA types into [*] types. 3719 case Type::VariableArray: { 3720 const auto *vat = cast<VariableArrayType>(ty); 3721 result = getVariableArrayType( 3722 getVariableArrayDecayedType(vat->getElementType()), 3723 /*size*/ nullptr, 3724 ArrayType::Star, 3725 vat->getIndexTypeCVRQualifiers(), 3726 vat->getBracketsRange()); 3727 break; 3728 } 3729 } 3730 3731 // Apply the top-level qualifiers from the original. 3732 return getQualifiedType(result, split.Quals); 3733 } 3734 3735 /// getVariableArrayType - Returns a non-unique reference to the type for a 3736 /// variable array of the specified element type. 3737 QualType ASTContext::getVariableArrayType(QualType EltTy, 3738 Expr *NumElts, 3739 ArrayType::ArraySizeModifier ASM, 3740 unsigned IndexTypeQuals, 3741 SourceRange Brackets) const { 3742 // Since we don't unique expressions, it isn't possible to unique VLA's 3743 // that have an expression provided for their size. 3744 QualType Canon; 3745 3746 // Be sure to pull qualifiers off the element type. 3747 // FIXME: Check below should look for qualifiers behind sugar. 3748 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3749 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3750 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3751 IndexTypeQuals, Brackets); 3752 Canon = getQualifiedType(Canon, canonSplit.Quals); 3753 } 3754 3755 auto *New = new (*this, TypeAlignment) 3756 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3757 3758 VariableArrayTypes.push_back(New); 3759 Types.push_back(New); 3760 return QualType(New, 0); 3761 } 3762 3763 /// getDependentSizedArrayType - Returns a non-unique reference to 3764 /// the type for a dependently-sized array of the specified element 3765 /// type. 3766 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3767 Expr *numElements, 3768 ArrayType::ArraySizeModifier ASM, 3769 unsigned elementTypeQuals, 3770 SourceRange brackets) const { 3771 assert((!numElements || numElements->isTypeDependent() || 3772 numElements->isValueDependent()) && 3773 "Size must be type- or value-dependent!"); 3774 3775 // Dependently-sized array types that do not have a specified number 3776 // of elements will have their sizes deduced from a dependent 3777 // initializer. We do no canonicalization here at all, which is okay 3778 // because they can't be used in most locations. 3779 if (!numElements) { 3780 auto *newType 3781 = new (*this, TypeAlignment) 3782 DependentSizedArrayType(*this, elementType, QualType(), 3783 numElements, ASM, elementTypeQuals, 3784 brackets); 3785 Types.push_back(newType); 3786 return QualType(newType, 0); 3787 } 3788 3789 // Otherwise, we actually build a new type every time, but we 3790 // also build a canonical type. 3791 3792 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3793 3794 void *insertPos = nullptr; 3795 llvm::FoldingSetNodeID ID; 3796 DependentSizedArrayType::Profile(ID, *this, 3797 QualType(canonElementType.Ty, 0), 3798 ASM, elementTypeQuals, numElements); 3799 3800 // Look for an existing type with these properties. 3801 DependentSizedArrayType *canonTy = 3802 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3803 3804 // If we don't have one, build one. 3805 if (!canonTy) { 3806 canonTy = new (*this, TypeAlignment) 3807 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), 3808 QualType(), numElements, ASM, elementTypeQuals, 3809 brackets); 3810 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3811 Types.push_back(canonTy); 3812 } 3813 3814 // Apply qualifiers from the element type to the array. 3815 QualType canon = getQualifiedType(QualType(canonTy,0), 3816 canonElementType.Quals); 3817 3818 // If we didn't need extra canonicalization for the element type or the size 3819 // expression, then just use that as our result. 3820 if (QualType(canonElementType.Ty, 0) == elementType && 3821 canonTy->getSizeExpr() == numElements) 3822 return canon; 3823 3824 // Otherwise, we need to build a type which follows the spelling 3825 // of the element type. 3826 auto *sugaredType 3827 = new (*this, TypeAlignment) 3828 DependentSizedArrayType(*this, elementType, canon, numElements, 3829 ASM, elementTypeQuals, brackets); 3830 Types.push_back(sugaredType); 3831 return QualType(sugaredType, 0); 3832 } 3833 3834 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3835 ArrayType::ArraySizeModifier ASM, 3836 unsigned elementTypeQuals) const { 3837 llvm::FoldingSetNodeID ID; 3838 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3839 3840 void *insertPos = nullptr; 3841 if (IncompleteArrayType *iat = 3842 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3843 return QualType(iat, 0); 3844 3845 // If the element type isn't canonical, this won't be a canonical type 3846 // either, so fill in the canonical type field. We also have to pull 3847 // qualifiers off the element type. 3848 QualType canon; 3849 3850 // FIXME: Check below should look for qualifiers behind sugar. 3851 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3852 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3853 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3854 ASM, elementTypeQuals); 3855 canon = getQualifiedType(canon, canonSplit.Quals); 3856 3857 // Get the new insert position for the node we care about. 3858 IncompleteArrayType *existing = 3859 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3860 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3861 } 3862 3863 auto *newType = new (*this, TypeAlignment) 3864 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3865 3866 IncompleteArrayTypes.InsertNode(newType, insertPos); 3867 Types.push_back(newType); 3868 return QualType(newType, 0); 3869 } 3870 3871 ASTContext::BuiltinVectorTypeInfo 3872 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3873 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3874 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3875 NUMVECTORS}; 3876 3877 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3878 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3879 3880 switch (Ty->getKind()) { 3881 default: 3882 llvm_unreachable("Unsupported builtin vector type"); 3883 case BuiltinType::SveInt8: 3884 return SVE_INT_ELTTY(8, 16, true, 1); 3885 case BuiltinType::SveUint8: 3886 return SVE_INT_ELTTY(8, 16, false, 1); 3887 case BuiltinType::SveInt8x2: 3888 return SVE_INT_ELTTY(8, 16, true, 2); 3889 case BuiltinType::SveUint8x2: 3890 return SVE_INT_ELTTY(8, 16, false, 2); 3891 case BuiltinType::SveInt8x3: 3892 return SVE_INT_ELTTY(8, 16, true, 3); 3893 case BuiltinType::SveUint8x3: 3894 return SVE_INT_ELTTY(8, 16, false, 3); 3895 case BuiltinType::SveInt8x4: 3896 return SVE_INT_ELTTY(8, 16, true, 4); 3897 case BuiltinType::SveUint8x4: 3898 return SVE_INT_ELTTY(8, 16, false, 4); 3899 case BuiltinType::SveInt16: 3900 return SVE_INT_ELTTY(16, 8, true, 1); 3901 case BuiltinType::SveUint16: 3902 return SVE_INT_ELTTY(16, 8, false, 1); 3903 case BuiltinType::SveInt16x2: 3904 return SVE_INT_ELTTY(16, 8, true, 2); 3905 case BuiltinType::SveUint16x2: 3906 return SVE_INT_ELTTY(16, 8, false, 2); 3907 case BuiltinType::SveInt16x3: 3908 return SVE_INT_ELTTY(16, 8, true, 3); 3909 case BuiltinType::SveUint16x3: 3910 return SVE_INT_ELTTY(16, 8, false, 3); 3911 case BuiltinType::SveInt16x4: 3912 return SVE_INT_ELTTY(16, 8, true, 4); 3913 case BuiltinType::SveUint16x4: 3914 return SVE_INT_ELTTY(16, 8, false, 4); 3915 case BuiltinType::SveInt32: 3916 return SVE_INT_ELTTY(32, 4, true, 1); 3917 case BuiltinType::SveUint32: 3918 return SVE_INT_ELTTY(32, 4, false, 1); 3919 case BuiltinType::SveInt32x2: 3920 return SVE_INT_ELTTY(32, 4, true, 2); 3921 case BuiltinType::SveUint32x2: 3922 return SVE_INT_ELTTY(32, 4, false, 2); 3923 case BuiltinType::SveInt32x3: 3924 return SVE_INT_ELTTY(32, 4, true, 3); 3925 case BuiltinType::SveUint32x3: 3926 return SVE_INT_ELTTY(32, 4, false, 3); 3927 case BuiltinType::SveInt32x4: 3928 return SVE_INT_ELTTY(32, 4, true, 4); 3929 case BuiltinType::SveUint32x4: 3930 return SVE_INT_ELTTY(32, 4, false, 4); 3931 case BuiltinType::SveInt64: 3932 return SVE_INT_ELTTY(64, 2, true, 1); 3933 case BuiltinType::SveUint64: 3934 return SVE_INT_ELTTY(64, 2, false, 1); 3935 case BuiltinType::SveInt64x2: 3936 return SVE_INT_ELTTY(64, 2, true, 2); 3937 case BuiltinType::SveUint64x2: 3938 return SVE_INT_ELTTY(64, 2, false, 2); 3939 case BuiltinType::SveInt64x3: 3940 return SVE_INT_ELTTY(64, 2, true, 3); 3941 case BuiltinType::SveUint64x3: 3942 return SVE_INT_ELTTY(64, 2, false, 3); 3943 case BuiltinType::SveInt64x4: 3944 return SVE_INT_ELTTY(64, 2, true, 4); 3945 case BuiltinType::SveUint64x4: 3946 return SVE_INT_ELTTY(64, 2, false, 4); 3947 case BuiltinType::SveBool: 3948 return SVE_ELTTY(BoolTy, 16, 1); 3949 case BuiltinType::SveFloat16: 3950 return SVE_ELTTY(HalfTy, 8, 1); 3951 case BuiltinType::SveFloat16x2: 3952 return SVE_ELTTY(HalfTy, 8, 2); 3953 case BuiltinType::SveFloat16x3: 3954 return SVE_ELTTY(HalfTy, 8, 3); 3955 case BuiltinType::SveFloat16x4: 3956 return SVE_ELTTY(HalfTy, 8, 4); 3957 case BuiltinType::SveFloat32: 3958 return SVE_ELTTY(FloatTy, 4, 1); 3959 case BuiltinType::SveFloat32x2: 3960 return SVE_ELTTY(FloatTy, 4, 2); 3961 case BuiltinType::SveFloat32x3: 3962 return SVE_ELTTY(FloatTy, 4, 3); 3963 case BuiltinType::SveFloat32x4: 3964 return SVE_ELTTY(FloatTy, 4, 4); 3965 case BuiltinType::SveFloat64: 3966 return SVE_ELTTY(DoubleTy, 2, 1); 3967 case BuiltinType::SveFloat64x2: 3968 return SVE_ELTTY(DoubleTy, 2, 2); 3969 case BuiltinType::SveFloat64x3: 3970 return SVE_ELTTY(DoubleTy, 2, 3); 3971 case BuiltinType::SveFloat64x4: 3972 return SVE_ELTTY(DoubleTy, 2, 4); 3973 case BuiltinType::SveBFloat16: 3974 return SVE_ELTTY(BFloat16Ty, 8, 1); 3975 case BuiltinType::SveBFloat16x2: 3976 return SVE_ELTTY(BFloat16Ty, 8, 2); 3977 case BuiltinType::SveBFloat16x3: 3978 return SVE_ELTTY(BFloat16Ty, 8, 3); 3979 case BuiltinType::SveBFloat16x4: 3980 return SVE_ELTTY(BFloat16Ty, 8, 4); 3981 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3982 IsSigned) \ 3983 case BuiltinType::Id: \ 3984 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3985 llvm::ElementCount::getScalable(NumEls), NF}; 3986 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3987 case BuiltinType::Id: \ 3988 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3989 llvm::ElementCount::getScalable(NumEls), NF}; 3990 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3991 case BuiltinType::Id: \ 3992 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3993 #include "clang/Basic/RISCVVTypes.def" 3994 } 3995 } 3996 3997 /// getScalableVectorType - Return the unique reference to a scalable vector 3998 /// type of the specified element type and size. VectorType must be a built-in 3999 /// type. 4000 QualType ASTContext::getScalableVectorType(QualType EltTy, 4001 unsigned NumElts) const { 4002 if (Target->hasAArch64SVETypes()) { 4003 uint64_t EltTySize = getTypeSize(EltTy); 4004 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 4005 IsSigned, IsFP, IsBF) \ 4006 if (!EltTy->isBooleanType() && \ 4007 ((EltTy->hasIntegerRepresentation() && \ 4008 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 4009 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 4010 IsFP && !IsBF) || \ 4011 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 4012 IsBF && !IsFP)) && \ 4013 EltTySize == ElBits && NumElts == NumEls) { \ 4014 return SingletonId; \ 4015 } 4016 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 4017 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4018 return SingletonId; 4019 #include "clang/Basic/AArch64SVEACLETypes.def" 4020 } else if (Target->hasRISCVVTypes()) { 4021 uint64_t EltTySize = getTypeSize(EltTy); 4022 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 4023 IsFP) \ 4024 if (!EltTy->isBooleanType() && \ 4025 ((EltTy->hasIntegerRepresentation() && \ 4026 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 4027 (EltTy->hasFloatingRepresentation() && IsFP)) && \ 4028 EltTySize == ElBits && NumElts == NumEls) \ 4029 return SingletonId; 4030 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 4031 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4032 return SingletonId; 4033 #include "clang/Basic/RISCVVTypes.def" 4034 } 4035 return QualType(); 4036 } 4037 4038 /// getVectorType - Return the unique reference to a vector type of 4039 /// the specified element type and size. VectorType must be a built-in type. 4040 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4041 VectorType::VectorKind VecKind) const { 4042 assert(vecType->isBuiltinType() || 4043 (vecType->isBitIntType() && 4044 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4045 llvm::isPowerOf2_32(vecType->getAs<BitIntType>()->getNumBits()) && 4046 vecType->getAs<BitIntType>()->getNumBits() >= 8)); 4047 4048 // Check if we've already instantiated a vector of this type. 4049 llvm::FoldingSetNodeID ID; 4050 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4051 4052 void *InsertPos = nullptr; 4053 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4054 return QualType(VTP, 0); 4055 4056 // If the element type isn't canonical, this won't be a canonical type either, 4057 // so fill in the canonical type field. 4058 QualType Canonical; 4059 if (!vecType.isCanonical()) { 4060 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4061 4062 // Get the new insert position for the node we care about. 4063 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4064 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4065 } 4066 auto *New = new (*this, TypeAlignment) 4067 VectorType(vecType, NumElts, Canonical, VecKind); 4068 VectorTypes.InsertNode(New, InsertPos); 4069 Types.push_back(New); 4070 return QualType(New, 0); 4071 } 4072 4073 QualType 4074 ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4075 SourceLocation AttrLoc, 4076 VectorType::VectorKind VecKind) const { 4077 llvm::FoldingSetNodeID ID; 4078 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4079 VecKind); 4080 void *InsertPos = nullptr; 4081 DependentVectorType *Canon = 4082 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4083 DependentVectorType *New; 4084 4085 if (Canon) { 4086 New = new (*this, TypeAlignment) DependentVectorType( 4087 *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4088 } else { 4089 QualType CanonVecTy = getCanonicalType(VecType); 4090 if (CanonVecTy == VecType) { 4091 New = new (*this, TypeAlignment) DependentVectorType( 4092 *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4093 4094 DependentVectorType *CanonCheck = 4095 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4096 assert(!CanonCheck && 4097 "Dependent-sized vector_size canonical type broken"); 4098 (void)CanonCheck; 4099 DependentVectorTypes.InsertNode(New, InsertPos); 4100 } else { 4101 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4102 SourceLocation(), VecKind); 4103 New = new (*this, TypeAlignment) DependentVectorType( 4104 *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4105 } 4106 } 4107 4108 Types.push_back(New); 4109 return QualType(New, 0); 4110 } 4111 4112 /// getExtVectorType - Return the unique reference to an extended vector type of 4113 /// the specified element type and size. VectorType must be a built-in type. 4114 QualType ASTContext::getExtVectorType(QualType vecType, 4115 unsigned NumElts) const { 4116 assert(vecType->isBuiltinType() || vecType->isDependentType() || 4117 (vecType->isBitIntType() && 4118 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4119 llvm::isPowerOf2_32(vecType->getAs<BitIntType>()->getNumBits()) && 4120 vecType->getAs<BitIntType>()->getNumBits() >= 8)); 4121 4122 // Check if we've already instantiated a vector of this type. 4123 llvm::FoldingSetNodeID ID; 4124 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4125 VectorType::GenericVector); 4126 void *InsertPos = nullptr; 4127 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4128 return QualType(VTP, 0); 4129 4130 // If the element type isn't canonical, this won't be a canonical type either, 4131 // so fill in the canonical type field. 4132 QualType Canonical; 4133 if (!vecType.isCanonical()) { 4134 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4135 4136 // Get the new insert position for the node we care about. 4137 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4138 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4139 } 4140 auto *New = new (*this, TypeAlignment) 4141 ExtVectorType(vecType, NumElts, Canonical); 4142 VectorTypes.InsertNode(New, InsertPos); 4143 Types.push_back(New); 4144 return QualType(New, 0); 4145 } 4146 4147 QualType 4148 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4149 Expr *SizeExpr, 4150 SourceLocation AttrLoc) const { 4151 llvm::FoldingSetNodeID ID; 4152 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4153 SizeExpr); 4154 4155 void *InsertPos = nullptr; 4156 DependentSizedExtVectorType *Canon 4157 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4158 DependentSizedExtVectorType *New; 4159 if (Canon) { 4160 // We already have a canonical version of this array type; use it as 4161 // the canonical type for a newly-built type. 4162 New = new (*this, TypeAlignment) 4163 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 4164 SizeExpr, AttrLoc); 4165 } else { 4166 QualType CanonVecTy = getCanonicalType(vecType); 4167 if (CanonVecTy == vecType) { 4168 New = new (*this, TypeAlignment) 4169 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 4170 AttrLoc); 4171 4172 DependentSizedExtVectorType *CanonCheck 4173 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4174 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4175 (void)CanonCheck; 4176 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4177 } else { 4178 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4179 SourceLocation()); 4180 New = new (*this, TypeAlignment) DependentSizedExtVectorType( 4181 *this, vecType, CanonExtTy, SizeExpr, AttrLoc); 4182 } 4183 } 4184 4185 Types.push_back(New); 4186 return QualType(New, 0); 4187 } 4188 4189 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4190 unsigned NumColumns) const { 4191 llvm::FoldingSetNodeID ID; 4192 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4193 Type::ConstantMatrix); 4194 4195 assert(MatrixType::isValidElementType(ElementTy) && 4196 "need a valid element type"); 4197 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4198 ConstantMatrixType::isDimensionValid(NumColumns) && 4199 "need valid matrix dimensions"); 4200 void *InsertPos = nullptr; 4201 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4202 return QualType(MTP, 0); 4203 4204 QualType Canonical; 4205 if (!ElementTy.isCanonical()) { 4206 Canonical = 4207 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4208 4209 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4210 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4211 (void)NewIP; 4212 } 4213 4214 auto *New = new (*this, TypeAlignment) 4215 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4216 MatrixTypes.InsertNode(New, InsertPos); 4217 Types.push_back(New); 4218 return QualType(New, 0); 4219 } 4220 4221 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4222 Expr *RowExpr, 4223 Expr *ColumnExpr, 4224 SourceLocation AttrLoc) const { 4225 QualType CanonElementTy = getCanonicalType(ElementTy); 4226 llvm::FoldingSetNodeID ID; 4227 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4228 ColumnExpr); 4229 4230 void *InsertPos = nullptr; 4231 DependentSizedMatrixType *Canon = 4232 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4233 4234 if (!Canon) { 4235 Canon = new (*this, TypeAlignment) DependentSizedMatrixType( 4236 *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); 4237 #ifndef NDEBUG 4238 DependentSizedMatrixType *CanonCheck = 4239 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4240 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4241 #endif 4242 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4243 Types.push_back(Canon); 4244 } 4245 4246 // Already have a canonical version of the matrix type 4247 // 4248 // If it exactly matches the requested type, use it directly. 4249 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4250 Canon->getRowExpr() == ColumnExpr) 4251 return QualType(Canon, 0); 4252 4253 // Use Canon as the canonical type for newly-built type. 4254 DependentSizedMatrixType *New = new (*this, TypeAlignment) 4255 DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, 4256 ColumnExpr, AttrLoc); 4257 Types.push_back(New); 4258 return QualType(New, 0); 4259 } 4260 4261 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4262 Expr *AddrSpaceExpr, 4263 SourceLocation AttrLoc) const { 4264 assert(AddrSpaceExpr->isInstantiationDependent()); 4265 4266 QualType canonPointeeType = getCanonicalType(PointeeType); 4267 4268 void *insertPos = nullptr; 4269 llvm::FoldingSetNodeID ID; 4270 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4271 AddrSpaceExpr); 4272 4273 DependentAddressSpaceType *canonTy = 4274 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4275 4276 if (!canonTy) { 4277 canonTy = new (*this, TypeAlignment) 4278 DependentAddressSpaceType(*this, canonPointeeType, 4279 QualType(), AddrSpaceExpr, AttrLoc); 4280 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4281 Types.push_back(canonTy); 4282 } 4283 4284 if (canonPointeeType == PointeeType && 4285 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4286 return QualType(canonTy, 0); 4287 4288 auto *sugaredType 4289 = new (*this, TypeAlignment) 4290 DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), 4291 AddrSpaceExpr, AttrLoc); 4292 Types.push_back(sugaredType); 4293 return QualType(sugaredType, 0); 4294 } 4295 4296 /// Determine whether \p T is canonical as the result type of a function. 4297 static bool isCanonicalResultType(QualType T) { 4298 return T.isCanonical() && 4299 (T.getObjCLifetime() == Qualifiers::OCL_None || 4300 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4301 } 4302 4303 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4304 QualType 4305 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4306 const FunctionType::ExtInfo &Info) const { 4307 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4308 // functionality creates a function without a prototype regardless of 4309 // language mode (so it makes them even in C++). Once the rewriter has been 4310 // fixed, this assertion can be enabled again. 4311 //assert(!LangOpts.requiresStrictPrototypes() && 4312 // "strict prototypes are disabled"); 4313 4314 // Unique functions, to guarantee there is only one function of a particular 4315 // structure. 4316 llvm::FoldingSetNodeID ID; 4317 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4318 4319 void *InsertPos = nullptr; 4320 if (FunctionNoProtoType *FT = 4321 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4322 return QualType(FT, 0); 4323 4324 QualType Canonical; 4325 if (!isCanonicalResultType(ResultTy)) { 4326 Canonical = 4327 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4328 4329 // Get the new insert position for the node we care about. 4330 FunctionNoProtoType *NewIP = 4331 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4332 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4333 } 4334 4335 auto *New = new (*this, TypeAlignment) 4336 FunctionNoProtoType(ResultTy, Canonical, Info); 4337 Types.push_back(New); 4338 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4339 return QualType(New, 0); 4340 } 4341 4342 CanQualType 4343 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4344 CanQualType CanResultType = getCanonicalType(ResultType); 4345 4346 // Canonical result types do not have ARC lifetime qualifiers. 4347 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4348 Qualifiers Qs = CanResultType.getQualifiers(); 4349 Qs.removeObjCLifetime(); 4350 return CanQualType::CreateUnsafe( 4351 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4352 } 4353 4354 return CanResultType; 4355 } 4356 4357 static bool isCanonicalExceptionSpecification( 4358 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4359 if (ESI.Type == EST_None) 4360 return true; 4361 if (!NoexceptInType) 4362 return false; 4363 4364 // C++17 onwards: exception specification is part of the type, as a simple 4365 // boolean "can this function type throw". 4366 if (ESI.Type == EST_BasicNoexcept) 4367 return true; 4368 4369 // A noexcept(expr) specification is (possibly) canonical if expr is 4370 // value-dependent. 4371 if (ESI.Type == EST_DependentNoexcept) 4372 return true; 4373 4374 // A dynamic exception specification is canonical if it only contains pack 4375 // expansions (so we can't tell whether it's non-throwing) and all its 4376 // contained types are canonical. 4377 if (ESI.Type == EST_Dynamic) { 4378 bool AnyPackExpansions = false; 4379 for (QualType ET : ESI.Exceptions) { 4380 if (!ET.isCanonical()) 4381 return false; 4382 if (ET->getAs<PackExpansionType>()) 4383 AnyPackExpansions = true; 4384 } 4385 return AnyPackExpansions; 4386 } 4387 4388 return false; 4389 } 4390 4391 QualType ASTContext::getFunctionTypeInternal( 4392 QualType ResultTy, ArrayRef<QualType> ArgArray, 4393 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4394 size_t NumArgs = ArgArray.size(); 4395 4396 // Unique functions, to guarantee there is only one function of a particular 4397 // structure. 4398 llvm::FoldingSetNodeID ID; 4399 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4400 *this, true); 4401 4402 QualType Canonical; 4403 bool Unique = false; 4404 4405 void *InsertPos = nullptr; 4406 if (FunctionProtoType *FPT = 4407 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4408 QualType Existing = QualType(FPT, 0); 4409 4410 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4411 // it so long as our exception specification doesn't contain a dependent 4412 // noexcept expression, or we're just looking for a canonical type. 4413 // Otherwise, we're going to need to create a type 4414 // sugar node to hold the concrete expression. 4415 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4416 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4417 return Existing; 4418 4419 // We need a new type sugar node for this one, to hold the new noexcept 4420 // expression. We do no canonicalization here, but that's OK since we don't 4421 // expect to see the same noexcept expression much more than once. 4422 Canonical = getCanonicalType(Existing); 4423 Unique = true; 4424 } 4425 4426 bool NoexceptInType = getLangOpts().CPlusPlus17; 4427 bool IsCanonicalExceptionSpec = 4428 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4429 4430 // Determine whether the type being created is already canonical or not. 4431 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4432 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4433 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4434 if (!ArgArray[i].isCanonicalAsParam()) 4435 isCanonical = false; 4436 4437 if (OnlyWantCanonical) 4438 assert(isCanonical && 4439 "given non-canonical parameters constructing canonical type"); 4440 4441 // If this type isn't canonical, get the canonical version of it if we don't 4442 // already have it. The exception spec is only partially part of the 4443 // canonical type, and only in C++17 onwards. 4444 if (!isCanonical && Canonical.isNull()) { 4445 SmallVector<QualType, 16> CanonicalArgs; 4446 CanonicalArgs.reserve(NumArgs); 4447 for (unsigned i = 0; i != NumArgs; ++i) 4448 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4449 4450 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4451 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4452 CanonicalEPI.HasTrailingReturn = false; 4453 4454 if (IsCanonicalExceptionSpec) { 4455 // Exception spec is already OK. 4456 } else if (NoexceptInType) { 4457 switch (EPI.ExceptionSpec.Type) { 4458 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4459 // We don't know yet. It shouldn't matter what we pick here; no-one 4460 // should ever look at this. 4461 [[fallthrough]]; 4462 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4463 CanonicalEPI.ExceptionSpec.Type = EST_None; 4464 break; 4465 4466 // A dynamic exception specification is almost always "not noexcept", 4467 // with the exception that a pack expansion might expand to no types. 4468 case EST_Dynamic: { 4469 bool AnyPacks = false; 4470 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4471 if (ET->getAs<PackExpansionType>()) 4472 AnyPacks = true; 4473 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4474 } 4475 if (!AnyPacks) 4476 CanonicalEPI.ExceptionSpec.Type = EST_None; 4477 else { 4478 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4479 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4480 } 4481 break; 4482 } 4483 4484 case EST_DynamicNone: 4485 case EST_BasicNoexcept: 4486 case EST_NoexceptTrue: 4487 case EST_NoThrow: 4488 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4489 break; 4490 4491 case EST_DependentNoexcept: 4492 llvm_unreachable("dependent noexcept is already canonical"); 4493 } 4494 } else { 4495 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4496 } 4497 4498 // Adjust the canonical function result type. 4499 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4500 Canonical = 4501 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4502 4503 // Get the new insert position for the node we care about. 4504 FunctionProtoType *NewIP = 4505 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4506 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4507 } 4508 4509 // Compute the needed size to hold this FunctionProtoType and the 4510 // various trailing objects. 4511 auto ESH = FunctionProtoType::getExceptionSpecSize( 4512 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4513 size_t Size = FunctionProtoType::totalSizeToAlloc< 4514 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4515 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4516 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4517 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4518 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4519 EPI.ExtParameterInfos ? NumArgs : 0, 4520 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4521 4522 auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); 4523 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4524 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4525 Types.push_back(FTP); 4526 if (!Unique) 4527 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4528 return QualType(FTP, 0); 4529 } 4530 4531 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4532 llvm::FoldingSetNodeID ID; 4533 PipeType::Profile(ID, T, ReadOnly); 4534 4535 void *InsertPos = nullptr; 4536 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4537 return QualType(PT, 0); 4538 4539 // If the pipe element type isn't canonical, this won't be a canonical type 4540 // either, so fill in the canonical type field. 4541 QualType Canonical; 4542 if (!T.isCanonical()) { 4543 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4544 4545 // Get the new insert position for the node we care about. 4546 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4547 assert(!NewIP && "Shouldn't be in the map!"); 4548 (void)NewIP; 4549 } 4550 auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); 4551 Types.push_back(New); 4552 PipeTypes.InsertNode(New, InsertPos); 4553 return QualType(New, 0); 4554 } 4555 4556 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4557 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4558 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4559 : Ty; 4560 } 4561 4562 QualType ASTContext::getReadPipeType(QualType T) const { 4563 return getPipeType(T, true); 4564 } 4565 4566 QualType ASTContext::getWritePipeType(QualType T) const { 4567 return getPipeType(T, false); 4568 } 4569 4570 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4571 llvm::FoldingSetNodeID ID; 4572 BitIntType::Profile(ID, IsUnsigned, NumBits); 4573 4574 void *InsertPos = nullptr; 4575 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4576 return QualType(EIT, 0); 4577 4578 auto *New = new (*this, TypeAlignment) BitIntType(IsUnsigned, NumBits); 4579 BitIntTypes.InsertNode(New, InsertPos); 4580 Types.push_back(New); 4581 return QualType(New, 0); 4582 } 4583 4584 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4585 Expr *NumBitsExpr) const { 4586 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4587 llvm::FoldingSetNodeID ID; 4588 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4589 4590 void *InsertPos = nullptr; 4591 if (DependentBitIntType *Existing = 4592 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4593 return QualType(Existing, 0); 4594 4595 auto *New = new (*this, TypeAlignment) 4596 DependentBitIntType(*this, IsUnsigned, NumBitsExpr); 4597 DependentBitIntTypes.InsertNode(New, InsertPos); 4598 4599 Types.push_back(New); 4600 return QualType(New, 0); 4601 } 4602 4603 #ifndef NDEBUG 4604 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4605 if (!isa<CXXRecordDecl>(D)) return false; 4606 const auto *RD = cast<CXXRecordDecl>(D); 4607 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4608 return true; 4609 if (RD->getDescribedClassTemplate() && 4610 !isa<ClassTemplateSpecializationDecl>(RD)) 4611 return true; 4612 return false; 4613 } 4614 #endif 4615 4616 /// getInjectedClassNameType - Return the unique reference to the 4617 /// injected class name type for the specified templated declaration. 4618 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4619 QualType TST) const { 4620 assert(NeedsInjectedClassNameType(Decl)); 4621 if (Decl->TypeForDecl) { 4622 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4623 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4624 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4625 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4626 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4627 } else { 4628 Type *newType = 4629 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 4630 Decl->TypeForDecl = newType; 4631 Types.push_back(newType); 4632 } 4633 return QualType(Decl->TypeForDecl, 0); 4634 } 4635 4636 /// getTypeDeclType - Return the unique reference to the type for the 4637 /// specified type declaration. 4638 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4639 assert(Decl && "Passed null for Decl param"); 4640 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4641 4642 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4643 return getTypedefType(Typedef); 4644 4645 assert(!isa<TemplateTypeParmDecl>(Decl) && 4646 "Template type parameter types are always available."); 4647 4648 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4649 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4650 assert(!NeedsInjectedClassNameType(Record)); 4651 return getRecordType(Record); 4652 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4653 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4654 return getEnumType(Enum); 4655 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4656 return getUnresolvedUsingType(Using); 4657 } else 4658 llvm_unreachable("TypeDecl without a type?"); 4659 4660 return QualType(Decl->TypeForDecl, 0); 4661 } 4662 4663 /// getTypedefType - Return the unique reference to the type for the 4664 /// specified typedef name decl. 4665 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4666 QualType Underlying) const { 4667 if (!Decl->TypeForDecl) { 4668 if (Underlying.isNull()) 4669 Underlying = Decl->getUnderlyingType(); 4670 auto *NewType = new (*this, TypeAlignment) TypedefType( 4671 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying)); 4672 Decl->TypeForDecl = NewType; 4673 Types.push_back(NewType); 4674 return QualType(NewType, 0); 4675 } 4676 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying) 4677 return QualType(Decl->TypeForDecl, 0); 4678 assert(hasSameType(Decl->getUnderlyingType(), Underlying)); 4679 4680 llvm::FoldingSetNodeID ID; 4681 TypedefType::Profile(ID, Decl, Underlying); 4682 4683 void *InsertPos = nullptr; 4684 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4685 assert(!T->typeMatchesDecl() && 4686 "non-divergent case should be handled with TypeDecl"); 4687 return QualType(T, 0); 4688 } 4689 4690 void *Mem = 4691 Allocate(TypedefType::totalSizeToAlloc<QualType>(true), TypeAlignment); 4692 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying, 4693 getCanonicalType(Underlying)); 4694 TypedefTypes.InsertNode(NewType, InsertPos); 4695 Types.push_back(NewType); 4696 return QualType(NewType, 0); 4697 } 4698 4699 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4700 QualType Underlying) const { 4701 llvm::FoldingSetNodeID ID; 4702 UsingType::Profile(ID, Found, Underlying); 4703 4704 void *InsertPos = nullptr; 4705 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos)) 4706 return QualType(T, 0); 4707 4708 const Type *TypeForDecl = 4709 cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(); 4710 4711 assert(!Underlying.hasLocalQualifiers()); 4712 QualType Canon = Underlying->getCanonicalTypeInternal(); 4713 assert(TypeForDecl->getCanonicalTypeInternal() == Canon); 4714 4715 if (Underlying.getTypePtr() == TypeForDecl) 4716 Underlying = QualType(); 4717 void *Mem = 4718 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()), 4719 TypeAlignment); 4720 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon); 4721 Types.push_back(NewType); 4722 UsingTypes.InsertNode(NewType, InsertPos); 4723 return QualType(NewType, 0); 4724 } 4725 4726 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4727 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4728 4729 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4730 if (PrevDecl->TypeForDecl) 4731 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4732 4733 auto *newType = new (*this, TypeAlignment) RecordType(Decl); 4734 Decl->TypeForDecl = newType; 4735 Types.push_back(newType); 4736 return QualType(newType, 0); 4737 } 4738 4739 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4740 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4741 4742 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4743 if (PrevDecl->TypeForDecl) 4744 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4745 4746 auto *newType = new (*this, TypeAlignment) EnumType(Decl); 4747 Decl->TypeForDecl = newType; 4748 Types.push_back(newType); 4749 return QualType(newType, 0); 4750 } 4751 4752 QualType ASTContext::getUnresolvedUsingType( 4753 const UnresolvedUsingTypenameDecl *Decl) const { 4754 if (Decl->TypeForDecl) 4755 return QualType(Decl->TypeForDecl, 0); 4756 4757 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4758 Decl->getCanonicalDecl()) 4759 if (CanonicalDecl->TypeForDecl) 4760 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4761 4762 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Decl); 4763 Decl->TypeForDecl = newType; 4764 Types.push_back(newType); 4765 return QualType(newType, 0); 4766 } 4767 4768 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4769 QualType modifiedType, 4770 QualType equivalentType) const { 4771 llvm::FoldingSetNodeID id; 4772 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4773 4774 void *insertPos = nullptr; 4775 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4776 if (type) return QualType(type, 0); 4777 4778 QualType canon = getCanonicalType(equivalentType); 4779 type = new (*this, TypeAlignment) 4780 AttributedType(canon, attrKind, modifiedType, equivalentType); 4781 4782 Types.push_back(type); 4783 AttributedTypes.InsertNode(type, insertPos); 4784 4785 return QualType(type, 0); 4786 } 4787 4788 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 4789 QualType Wrapped) { 4790 llvm::FoldingSetNodeID ID; 4791 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 4792 4793 void *InsertPos = nullptr; 4794 BTFTagAttributedType *Ty = 4795 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 4796 if (Ty) 4797 return QualType(Ty, 0); 4798 4799 QualType Canon = getCanonicalType(Wrapped); 4800 Ty = new (*this, TypeAlignment) BTFTagAttributedType(Canon, Wrapped, BTFAttr); 4801 4802 Types.push_back(Ty); 4803 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 4804 4805 return QualType(Ty, 0); 4806 } 4807 4808 /// Retrieve a substitution-result type. 4809 QualType ASTContext::getSubstTemplateTypeParmType( 4810 QualType Replacement, Decl *AssociatedDecl, unsigned Index, 4811 std::optional<unsigned> PackIndex) const { 4812 llvm::FoldingSetNodeID ID; 4813 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index, 4814 PackIndex); 4815 void *InsertPos = nullptr; 4816 SubstTemplateTypeParmType *SubstParm = 4817 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4818 4819 if (!SubstParm) { 4820 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>( 4821 !Replacement.isCanonical()), 4822 TypeAlignment); 4823 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl, 4824 Index, PackIndex); 4825 Types.push_back(SubstParm); 4826 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4827 } 4828 4829 return QualType(SubstParm, 0); 4830 } 4831 4832 /// Retrieve a 4833 QualType 4834 ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl, 4835 unsigned Index, bool Final, 4836 const TemplateArgument &ArgPack) { 4837 #ifndef NDEBUG 4838 for (const auto &P : ArgPack.pack_elements()) 4839 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type"); 4840 #endif 4841 4842 llvm::FoldingSetNodeID ID; 4843 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final, 4844 ArgPack); 4845 void *InsertPos = nullptr; 4846 if (SubstTemplateTypeParmPackType *SubstParm = 4847 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4848 return QualType(SubstParm, 0); 4849 4850 QualType Canon; 4851 { 4852 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); 4853 if (!AssociatedDecl->isCanonicalDecl() || 4854 !CanonArgPack.structurallyEquals(ArgPack)) { 4855 Canon = getSubstTemplateTypeParmPackType( 4856 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack); 4857 [[maybe_unused]] const auto *Nothing = 4858 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4859 assert(!Nothing); 4860 } 4861 } 4862 4863 auto *SubstParm = new (*this, TypeAlignment) SubstTemplateTypeParmPackType( 4864 Canon, AssociatedDecl, Index, Final, ArgPack); 4865 Types.push_back(SubstParm); 4866 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4867 return QualType(SubstParm, 0); 4868 } 4869 4870 /// Retrieve the template type parameter type for a template 4871 /// parameter or parameter pack with the given depth, index, and (optionally) 4872 /// name. 4873 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4874 bool ParameterPack, 4875 TemplateTypeParmDecl *TTPDecl) const { 4876 llvm::FoldingSetNodeID ID; 4877 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4878 void *InsertPos = nullptr; 4879 TemplateTypeParmType *TypeParm 4880 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4881 4882 if (TypeParm) 4883 return QualType(TypeParm, 0); 4884 4885 if (TTPDecl) { 4886 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4887 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 4888 4889 TemplateTypeParmType *TypeCheck 4890 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4891 assert(!TypeCheck && "Template type parameter canonical type broken"); 4892 (void)TypeCheck; 4893 } else 4894 TypeParm = new (*this, TypeAlignment) 4895 TemplateTypeParmType(Depth, Index, ParameterPack); 4896 4897 Types.push_back(TypeParm); 4898 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4899 4900 return QualType(TypeParm, 0); 4901 } 4902 4903 TypeSourceInfo * 4904 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4905 SourceLocation NameLoc, 4906 const TemplateArgumentListInfo &Args, 4907 QualType Underlying) const { 4908 assert(!Name.getAsDependentTemplateName() && 4909 "No dependent template names here!"); 4910 QualType TST = 4911 getTemplateSpecializationType(Name, Args.arguments(), Underlying); 4912 4913 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4914 TemplateSpecializationTypeLoc TL = 4915 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4916 TL.setTemplateKeywordLoc(SourceLocation()); 4917 TL.setTemplateNameLoc(NameLoc); 4918 TL.setLAngleLoc(Args.getLAngleLoc()); 4919 TL.setRAngleLoc(Args.getRAngleLoc()); 4920 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4921 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4922 return DI; 4923 } 4924 4925 QualType 4926 ASTContext::getTemplateSpecializationType(TemplateName Template, 4927 ArrayRef<TemplateArgumentLoc> Args, 4928 QualType Underlying) const { 4929 assert(!Template.getAsDependentTemplateName() && 4930 "No dependent template names here!"); 4931 4932 SmallVector<TemplateArgument, 4> ArgVec; 4933 ArgVec.reserve(Args.size()); 4934 for (const TemplateArgumentLoc &Arg : Args) 4935 ArgVec.push_back(Arg.getArgument()); 4936 4937 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4938 } 4939 4940 #ifndef NDEBUG 4941 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4942 for (const TemplateArgument &Arg : Args) 4943 if (Arg.isPackExpansion()) 4944 return true; 4945 4946 return true; 4947 } 4948 #endif 4949 4950 QualType 4951 ASTContext::getTemplateSpecializationType(TemplateName Template, 4952 ArrayRef<TemplateArgument> Args, 4953 QualType Underlying) const { 4954 assert(!Template.getAsDependentTemplateName() && 4955 "No dependent template names here!"); 4956 // Look through qualified template names. 4957 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4958 Template = QTN->getUnderlyingTemplate(); 4959 4960 const auto *TD = Template.getAsTemplateDecl(); 4961 bool IsTypeAlias = TD && TD->isTypeAlias(); 4962 QualType CanonType; 4963 if (!Underlying.isNull()) 4964 CanonType = getCanonicalType(Underlying); 4965 else { 4966 // We can get here with an alias template when the specialization contains 4967 // a pack expansion that does not match up with a parameter pack. 4968 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4969 "Caller must compute aliased type"); 4970 IsTypeAlias = false; 4971 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4972 } 4973 4974 // Allocate the (non-canonical) template specialization type, but don't 4975 // try to unique it: these types typically have location information that 4976 // we don't unique and don't want to lose. 4977 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4978 sizeof(TemplateArgument) * Args.size() + 4979 (IsTypeAlias? sizeof(QualType) : 0), 4980 TypeAlignment); 4981 auto *Spec 4982 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4983 IsTypeAlias ? Underlying : QualType()); 4984 4985 Types.push_back(Spec); 4986 return QualType(Spec, 0); 4987 } 4988 4989 QualType ASTContext::getCanonicalTemplateSpecializationType( 4990 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4991 assert(!Template.getAsDependentTemplateName() && 4992 "No dependent template names here!"); 4993 4994 // Look through qualified template names. 4995 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4996 Template = TemplateName(QTN->getUnderlyingTemplate()); 4997 4998 // Build the canonical template specialization type. 4999 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 5000 bool AnyNonCanonArgs = false; 5001 auto CanonArgs = 5002 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5003 5004 // Determine whether this canonical template specialization type already 5005 // exists. 5006 llvm::FoldingSetNodeID ID; 5007 TemplateSpecializationType::Profile(ID, CanonTemplate, 5008 CanonArgs, *this); 5009 5010 void *InsertPos = nullptr; 5011 TemplateSpecializationType *Spec 5012 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5013 5014 if (!Spec) { 5015 // Allocate a new canonical template specialization type. 5016 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 5017 sizeof(TemplateArgument) * CanonArgs.size()), 5018 TypeAlignment); 5019 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 5020 CanonArgs, 5021 QualType(), QualType()); 5022 Types.push_back(Spec); 5023 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 5024 } 5025 5026 assert(Spec->isDependentType() && 5027 "Non-dependent template-id type must have a canonical type"); 5028 return QualType(Spec, 0); 5029 } 5030 5031 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 5032 NestedNameSpecifier *NNS, 5033 QualType NamedType, 5034 TagDecl *OwnedTagDecl) const { 5035 llvm::FoldingSetNodeID ID; 5036 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 5037 5038 void *InsertPos = nullptr; 5039 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5040 if (T) 5041 return QualType(T, 0); 5042 5043 QualType Canon = NamedType; 5044 if (!Canon.isCanonical()) { 5045 Canon = getCanonicalType(NamedType); 5046 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5047 assert(!CheckT && "Elaborated canonical type broken"); 5048 (void)CheckT; 5049 } 5050 5051 void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 5052 TypeAlignment); 5053 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 5054 5055 Types.push_back(T); 5056 ElaboratedTypes.InsertNode(T, InsertPos); 5057 return QualType(T, 0); 5058 } 5059 5060 QualType 5061 ASTContext::getParenType(QualType InnerType) const { 5062 llvm::FoldingSetNodeID ID; 5063 ParenType::Profile(ID, InnerType); 5064 5065 void *InsertPos = nullptr; 5066 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5067 if (T) 5068 return QualType(T, 0); 5069 5070 QualType Canon = InnerType; 5071 if (!Canon.isCanonical()) { 5072 Canon = getCanonicalType(InnerType); 5073 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5074 assert(!CheckT && "Paren canonical type broken"); 5075 (void)CheckT; 5076 } 5077 5078 T = new (*this, TypeAlignment) ParenType(InnerType, Canon); 5079 Types.push_back(T); 5080 ParenTypes.InsertNode(T, InsertPos); 5081 return QualType(T, 0); 5082 } 5083 5084 QualType 5085 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5086 const IdentifierInfo *MacroII) const { 5087 QualType Canon = UnderlyingTy; 5088 if (!Canon.isCanonical()) 5089 Canon = getCanonicalType(UnderlyingTy); 5090 5091 auto *newType = new (*this, TypeAlignment) 5092 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5093 Types.push_back(newType); 5094 return QualType(newType, 0); 5095 } 5096 5097 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5098 NestedNameSpecifier *NNS, 5099 const IdentifierInfo *Name, 5100 QualType Canon) const { 5101 if (Canon.isNull()) { 5102 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5103 if (CanonNNS != NNS) 5104 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5105 } 5106 5107 llvm::FoldingSetNodeID ID; 5108 DependentNameType::Profile(ID, Keyword, NNS, Name); 5109 5110 void *InsertPos = nullptr; 5111 DependentNameType *T 5112 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5113 if (T) 5114 return QualType(T, 0); 5115 5116 T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); 5117 Types.push_back(T); 5118 DependentNameTypes.InsertNode(T, InsertPos); 5119 return QualType(T, 0); 5120 } 5121 5122 QualType ASTContext::getDependentTemplateSpecializationType( 5123 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, 5124 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const { 5125 // TODO: avoid this copy 5126 SmallVector<TemplateArgument, 16> ArgCopy; 5127 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5128 ArgCopy.push_back(Args[I].getArgument()); 5129 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5130 } 5131 5132 QualType 5133 ASTContext::getDependentTemplateSpecializationType( 5134 ElaboratedTypeKeyword Keyword, 5135 NestedNameSpecifier *NNS, 5136 const IdentifierInfo *Name, 5137 ArrayRef<TemplateArgument> Args) const { 5138 assert((!NNS || NNS->isDependent()) && 5139 "nested-name-specifier must be dependent"); 5140 5141 llvm::FoldingSetNodeID ID; 5142 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5143 Name, Args); 5144 5145 void *InsertPos = nullptr; 5146 DependentTemplateSpecializationType *T 5147 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5148 if (T) 5149 return QualType(T, 0); 5150 5151 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5152 5153 ElaboratedTypeKeyword CanonKeyword = Keyword; 5154 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 5155 5156 bool AnyNonCanonArgs = false; 5157 auto CanonArgs = 5158 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5159 5160 QualType Canon; 5161 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5162 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5163 Name, 5164 CanonArgs); 5165 5166 // Find the insert position again. 5167 [[maybe_unused]] auto *Nothing = 5168 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5169 assert(!Nothing && "canonical type broken"); 5170 } 5171 5172 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5173 sizeof(TemplateArgument) * Args.size()), 5174 TypeAlignment); 5175 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5176 Name, Args, Canon); 5177 Types.push_back(T); 5178 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5179 return QualType(T, 0); 5180 } 5181 5182 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5183 TemplateArgument Arg; 5184 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5185 QualType ArgType = getTypeDeclType(TTP); 5186 if (TTP->isParameterPack()) 5187 ArgType = getPackExpansionType(ArgType, std::nullopt); 5188 5189 Arg = TemplateArgument(ArgType); 5190 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5191 QualType T = 5192 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5193 // For class NTTPs, ensure we include the 'const' so the type matches that 5194 // of a real template argument. 5195 // FIXME: It would be more faithful to model this as something like an 5196 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5197 if (T->isRecordType()) 5198 T.addConst(); 5199 Expr *E = new (*this) DeclRefExpr( 5200 *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T, 5201 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5202 5203 if (NTTP->isParameterPack()) 5204 E = new (*this) 5205 PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt); 5206 Arg = TemplateArgument(E); 5207 } else { 5208 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5209 if (TTP->isParameterPack()) 5210 Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>()); 5211 else 5212 Arg = TemplateArgument(TemplateName(TTP)); 5213 } 5214 5215 if (Param->isTemplateParameterPack()) 5216 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5217 5218 return Arg; 5219 } 5220 5221 void 5222 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5223 SmallVectorImpl<TemplateArgument> &Args) { 5224 Args.reserve(Args.size() + Params->size()); 5225 5226 for (NamedDecl *Param : *Params) 5227 Args.push_back(getInjectedTemplateArg(Param)); 5228 } 5229 5230 QualType ASTContext::getPackExpansionType(QualType Pattern, 5231 std::optional<unsigned> NumExpansions, 5232 bool ExpectPackInType) { 5233 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5234 "Pack expansions must expand one or more parameter packs"); 5235 5236 llvm::FoldingSetNodeID ID; 5237 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5238 5239 void *InsertPos = nullptr; 5240 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5241 if (T) 5242 return QualType(T, 0); 5243 5244 QualType Canon; 5245 if (!Pattern.isCanonical()) { 5246 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5247 /*ExpectPackInType=*/false); 5248 5249 // Find the insert position again, in case we inserted an element into 5250 // PackExpansionTypes and invalidated our insert position. 5251 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5252 } 5253 5254 T = new (*this, TypeAlignment) 5255 PackExpansionType(Pattern, Canon, NumExpansions); 5256 Types.push_back(T); 5257 PackExpansionTypes.InsertNode(T, InsertPos); 5258 return QualType(T, 0); 5259 } 5260 5261 /// CmpProtocolNames - Comparison predicate for sorting protocols 5262 /// alphabetically. 5263 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5264 ObjCProtocolDecl *const *RHS) { 5265 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5266 } 5267 5268 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5269 if (Protocols.empty()) return true; 5270 5271 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5272 return false; 5273 5274 for (unsigned i = 1; i != Protocols.size(); ++i) 5275 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5276 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5277 return false; 5278 return true; 5279 } 5280 5281 static void 5282 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5283 // Sort protocols, keyed by name. 5284 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5285 5286 // Canonicalize. 5287 for (ObjCProtocolDecl *&P : Protocols) 5288 P = P->getCanonicalDecl(); 5289 5290 // Remove duplicates. 5291 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5292 Protocols.erase(ProtocolsEnd, Protocols.end()); 5293 } 5294 5295 QualType ASTContext::getObjCObjectType(QualType BaseType, 5296 ObjCProtocolDecl * const *Protocols, 5297 unsigned NumProtocols) const { 5298 return getObjCObjectType(BaseType, {}, 5299 llvm::ArrayRef(Protocols, NumProtocols), 5300 /*isKindOf=*/false); 5301 } 5302 5303 QualType ASTContext::getObjCObjectType( 5304 QualType baseType, 5305 ArrayRef<QualType> typeArgs, 5306 ArrayRef<ObjCProtocolDecl *> protocols, 5307 bool isKindOf) const { 5308 // If the base type is an interface and there aren't any protocols or 5309 // type arguments to add, then the interface type will do just fine. 5310 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5311 isa<ObjCInterfaceType>(baseType)) 5312 return baseType; 5313 5314 // Look in the folding set for an existing type. 5315 llvm::FoldingSetNodeID ID; 5316 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5317 void *InsertPos = nullptr; 5318 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5319 return QualType(QT, 0); 5320 5321 // Determine the type arguments to be used for canonicalization, 5322 // which may be explicitly specified here or written on the base 5323 // type. 5324 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5325 if (effectiveTypeArgs.empty()) { 5326 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5327 effectiveTypeArgs = baseObject->getTypeArgs(); 5328 } 5329 5330 // Build the canonical type, which has the canonical base type and a 5331 // sorted-and-uniqued list of protocols and the type arguments 5332 // canonicalized. 5333 QualType canonical; 5334 bool typeArgsAreCanonical = llvm::all_of( 5335 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5336 bool protocolsSorted = areSortedAndUniqued(protocols); 5337 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5338 // Determine the canonical type arguments. 5339 ArrayRef<QualType> canonTypeArgs; 5340 SmallVector<QualType, 4> canonTypeArgsVec; 5341 if (!typeArgsAreCanonical) { 5342 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5343 for (auto typeArg : effectiveTypeArgs) 5344 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5345 canonTypeArgs = canonTypeArgsVec; 5346 } else { 5347 canonTypeArgs = effectiveTypeArgs; 5348 } 5349 5350 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5351 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5352 if (!protocolsSorted) { 5353 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5354 SortAndUniqueProtocols(canonProtocolsVec); 5355 canonProtocols = canonProtocolsVec; 5356 } else { 5357 canonProtocols = protocols; 5358 } 5359 5360 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5361 canonProtocols, isKindOf); 5362 5363 // Regenerate InsertPos. 5364 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5365 } 5366 5367 unsigned size = sizeof(ObjCObjectTypeImpl); 5368 size += typeArgs.size() * sizeof(QualType); 5369 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5370 void *mem = Allocate(size, TypeAlignment); 5371 auto *T = 5372 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5373 isKindOf); 5374 5375 Types.push_back(T); 5376 ObjCObjectTypes.InsertNode(T, InsertPos); 5377 return QualType(T, 0); 5378 } 5379 5380 /// Apply Objective-C protocol qualifiers to the given type. 5381 /// If this is for the canonical type of a type parameter, we can apply 5382 /// protocol qualifiers on the ObjCObjectPointerType. 5383 QualType 5384 ASTContext::applyObjCProtocolQualifiers(QualType type, 5385 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5386 bool allowOnPointerType) const { 5387 hasError = false; 5388 5389 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5390 return getObjCTypeParamType(objT->getDecl(), protocols); 5391 } 5392 5393 // Apply protocol qualifiers to ObjCObjectPointerType. 5394 if (allowOnPointerType) { 5395 if (const auto *objPtr = 5396 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5397 const ObjCObjectType *objT = objPtr->getObjectType(); 5398 // Merge protocol lists and construct ObjCObjectType. 5399 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5400 protocolsVec.append(objT->qual_begin(), 5401 objT->qual_end()); 5402 protocolsVec.append(protocols.begin(), protocols.end()); 5403 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5404 type = getObjCObjectType( 5405 objT->getBaseType(), 5406 objT->getTypeArgsAsWritten(), 5407 protocols, 5408 objT->isKindOfTypeAsWritten()); 5409 return getObjCObjectPointerType(type); 5410 } 5411 } 5412 5413 // Apply protocol qualifiers to ObjCObjectType. 5414 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5415 // FIXME: Check for protocols to which the class type is already 5416 // known to conform. 5417 5418 return getObjCObjectType(objT->getBaseType(), 5419 objT->getTypeArgsAsWritten(), 5420 protocols, 5421 objT->isKindOfTypeAsWritten()); 5422 } 5423 5424 // If the canonical type is ObjCObjectType, ... 5425 if (type->isObjCObjectType()) { 5426 // Silently overwrite any existing protocol qualifiers. 5427 // TODO: determine whether that's the right thing to do. 5428 5429 // FIXME: Check for protocols to which the class type is already 5430 // known to conform. 5431 return getObjCObjectType(type, {}, protocols, false); 5432 } 5433 5434 // id<protocol-list> 5435 if (type->isObjCIdType()) { 5436 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5437 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5438 objPtr->isKindOfType()); 5439 return getObjCObjectPointerType(type); 5440 } 5441 5442 // Class<protocol-list> 5443 if (type->isObjCClassType()) { 5444 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5445 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5446 objPtr->isKindOfType()); 5447 return getObjCObjectPointerType(type); 5448 } 5449 5450 hasError = true; 5451 return type; 5452 } 5453 5454 QualType 5455 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5456 ArrayRef<ObjCProtocolDecl *> protocols) const { 5457 // Look in the folding set for an existing type. 5458 llvm::FoldingSetNodeID ID; 5459 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5460 void *InsertPos = nullptr; 5461 if (ObjCTypeParamType *TypeParam = 5462 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5463 return QualType(TypeParam, 0); 5464 5465 // We canonicalize to the underlying type. 5466 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5467 if (!protocols.empty()) { 5468 // Apply the protocol qualifers. 5469 bool hasError; 5470 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5471 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5472 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5473 } 5474 5475 unsigned size = sizeof(ObjCTypeParamType); 5476 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5477 void *mem = Allocate(size, TypeAlignment); 5478 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5479 5480 Types.push_back(newType); 5481 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5482 return QualType(newType, 0); 5483 } 5484 5485 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5486 ObjCTypeParamDecl *New) const { 5487 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5488 // Update TypeForDecl after updating TypeSourceInfo. 5489 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5490 SmallVector<ObjCProtocolDecl *, 8> protocols; 5491 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5492 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5493 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5494 } 5495 5496 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5497 /// protocol list adopt all protocols in QT's qualified-id protocol 5498 /// list. 5499 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5500 ObjCInterfaceDecl *IC) { 5501 if (!QT->isObjCQualifiedIdType()) 5502 return false; 5503 5504 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5505 // If both the right and left sides have qualifiers. 5506 for (auto *Proto : OPT->quals()) { 5507 if (!IC->ClassImplementsProtocol(Proto, false)) 5508 return false; 5509 } 5510 return true; 5511 } 5512 return false; 5513 } 5514 5515 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5516 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5517 /// of protocols. 5518 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5519 ObjCInterfaceDecl *IDecl) { 5520 if (!QT->isObjCQualifiedIdType()) 5521 return false; 5522 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5523 if (!OPT) 5524 return false; 5525 if (!IDecl->hasDefinition()) 5526 return false; 5527 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5528 CollectInheritedProtocols(IDecl, InheritedProtocols); 5529 if (InheritedProtocols.empty()) 5530 return false; 5531 // Check that if every protocol in list of id<plist> conforms to a protocol 5532 // of IDecl's, then bridge casting is ok. 5533 bool Conforms = false; 5534 for (auto *Proto : OPT->quals()) { 5535 Conforms = false; 5536 for (auto *PI : InheritedProtocols) { 5537 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5538 Conforms = true; 5539 break; 5540 } 5541 } 5542 if (!Conforms) 5543 break; 5544 } 5545 if (Conforms) 5546 return true; 5547 5548 for (auto *PI : InheritedProtocols) { 5549 // If both the right and left sides have qualifiers. 5550 bool Adopts = false; 5551 for (auto *Proto : OPT->quals()) { 5552 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5553 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5554 break; 5555 } 5556 if (!Adopts) 5557 return false; 5558 } 5559 return true; 5560 } 5561 5562 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5563 /// the given object type. 5564 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5565 llvm::FoldingSetNodeID ID; 5566 ObjCObjectPointerType::Profile(ID, ObjectT); 5567 5568 void *InsertPos = nullptr; 5569 if (ObjCObjectPointerType *QT = 5570 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5571 return QualType(QT, 0); 5572 5573 // Find the canonical object type. 5574 QualType Canonical; 5575 if (!ObjectT.isCanonical()) { 5576 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5577 5578 // Regenerate InsertPos. 5579 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5580 } 5581 5582 // No match. 5583 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 5584 auto *QType = 5585 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5586 5587 Types.push_back(QType); 5588 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5589 return QualType(QType, 0); 5590 } 5591 5592 /// getObjCInterfaceType - Return the unique reference to the type for the 5593 /// specified ObjC interface decl. The list of protocols is optional. 5594 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5595 ObjCInterfaceDecl *PrevDecl) const { 5596 if (Decl->TypeForDecl) 5597 return QualType(Decl->TypeForDecl, 0); 5598 5599 if (PrevDecl) { 5600 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5601 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5602 return QualType(PrevDecl->TypeForDecl, 0); 5603 } 5604 5605 // Prefer the definition, if there is one. 5606 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5607 Decl = Def; 5608 5609 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 5610 auto *T = new (Mem) ObjCInterfaceType(Decl); 5611 Decl->TypeForDecl = T; 5612 Types.push_back(T); 5613 return QualType(T, 0); 5614 } 5615 5616 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5617 /// TypeOfExprType AST's (since expression's are never shared). For example, 5618 /// multiple declarations that refer to "typeof(x)" all contain different 5619 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5620 /// on canonical type's (which are always unique). 5621 QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { 5622 TypeOfExprType *toe; 5623 if (tofExpr->isTypeDependent()) { 5624 llvm::FoldingSetNodeID ID; 5625 DependentTypeOfExprType::Profile(ID, *this, tofExpr, 5626 Kind == TypeOfKind::Unqualified); 5627 5628 void *InsertPos = nullptr; 5629 DependentTypeOfExprType *Canon = 5630 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5631 if (Canon) { 5632 // We already have a "canonical" version of an identical, dependent 5633 // typeof(expr) type. Use that as our canonical type. 5634 toe = new (*this, TypeAlignment) 5635 TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); 5636 } else { 5637 // Build a new, canonical typeof(expr) type. 5638 Canon = new (*this, TypeAlignment) 5639 DependentTypeOfExprType(*this, tofExpr, Kind); 5640 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5641 toe = Canon; 5642 } 5643 } else { 5644 QualType Canonical = getCanonicalType(tofExpr->getType()); 5645 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Kind, Canonical); 5646 } 5647 Types.push_back(toe); 5648 return QualType(toe, 0); 5649 } 5650 5651 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5652 /// TypeOfType nodes. The only motivation to unique these nodes would be 5653 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5654 /// an issue. This doesn't affect the type checker, since it operates 5655 /// on canonical types (which are always unique). 5656 QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { 5657 QualType Canonical = getCanonicalType(tofType); 5658 auto *tot = 5659 new (*this, TypeAlignment) TypeOfType(tofType, Canonical, Kind); 5660 Types.push_back(tot); 5661 return QualType(tot, 0); 5662 } 5663 5664 /// getReferenceQualifiedType - Given an expr, will return the type for 5665 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5666 /// and class member access into account. 5667 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5668 // C++11 [dcl.type.simple]p4: 5669 // [...] 5670 QualType T = E->getType(); 5671 switch (E->getValueKind()) { 5672 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5673 // type of e; 5674 case VK_XValue: 5675 return getRValueReferenceType(T); 5676 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5677 // type of e; 5678 case VK_LValue: 5679 return getLValueReferenceType(T); 5680 // - otherwise, decltype(e) is the type of e. 5681 case VK_PRValue: 5682 return T; 5683 } 5684 llvm_unreachable("Unknown value kind"); 5685 } 5686 5687 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5688 /// nodes. This would never be helpful, since each such type has its own 5689 /// expression, and would not give a significant memory saving, since there 5690 /// is an Expr tree under each such type. 5691 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5692 DecltypeType *dt; 5693 5694 // C++11 [temp.type]p2: 5695 // If an expression e involves a template parameter, decltype(e) denotes a 5696 // unique dependent type. Two such decltype-specifiers refer to the same 5697 // type only if their expressions are equivalent (14.5.6.1). 5698 if (e->isInstantiationDependent()) { 5699 llvm::FoldingSetNodeID ID; 5700 DependentDecltypeType::Profile(ID, *this, e); 5701 5702 void *InsertPos = nullptr; 5703 DependentDecltypeType *Canon 5704 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5705 if (!Canon) { 5706 // Build a new, canonical decltype(expr) type. 5707 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 5708 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5709 } 5710 dt = new (*this, TypeAlignment) 5711 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5712 } else { 5713 dt = new (*this, TypeAlignment) 5714 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5715 } 5716 Types.push_back(dt); 5717 return QualType(dt, 0); 5718 } 5719 5720 /// getUnaryTransformationType - We don't unique these, since the memory 5721 /// savings are minimal and these are rare. 5722 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5723 QualType UnderlyingType, 5724 UnaryTransformType::UTTKind Kind) 5725 const { 5726 UnaryTransformType *ut = nullptr; 5727 5728 if (BaseType->isDependentType()) { 5729 // Look in the folding set for an existing type. 5730 llvm::FoldingSetNodeID ID; 5731 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5732 5733 void *InsertPos = nullptr; 5734 DependentUnaryTransformType *Canon 5735 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5736 5737 if (!Canon) { 5738 // Build a new, canonical __underlying_type(type) type. 5739 Canon = new (*this, TypeAlignment) 5740 DependentUnaryTransformType(*this, getCanonicalType(BaseType), 5741 Kind); 5742 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5743 } 5744 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5745 QualType(), Kind, 5746 QualType(Canon, 0)); 5747 } else { 5748 QualType CanonType = getCanonicalType(UnderlyingType); 5749 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5750 UnderlyingType, Kind, 5751 CanonType); 5752 } 5753 Types.push_back(ut); 5754 return QualType(ut, 0); 5755 } 5756 5757 QualType ASTContext::getAutoTypeInternal( 5758 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5759 bool IsPack, ConceptDecl *TypeConstraintConcept, 5760 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5761 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5762 !TypeConstraintConcept && !IsDependent) 5763 return getAutoDeductType(); 5764 5765 // Look in the folding set for an existing type. 5766 void *InsertPos = nullptr; 5767 llvm::FoldingSetNodeID ID; 5768 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5769 TypeConstraintConcept, TypeConstraintArgs); 5770 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5771 return QualType(AT, 0); 5772 5773 QualType Canon; 5774 if (!IsCanon) { 5775 if (!DeducedType.isNull()) { 5776 Canon = DeducedType.getCanonicalType(); 5777 } else if (TypeConstraintConcept) { 5778 Canon = getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5779 nullptr, {}, true); 5780 // Find the insert position again. 5781 [[maybe_unused]] auto *Nothing = 5782 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5783 assert(!Nothing && "canonical type broken"); 5784 } 5785 } 5786 5787 void *Mem = Allocate(sizeof(AutoType) + 5788 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5789 TypeAlignment); 5790 auto *AT = new (Mem) AutoType( 5791 DeducedType, Keyword, 5792 (IsDependent ? TypeDependence::DependentInstantiation 5793 : TypeDependence::None) | 5794 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5795 Canon, TypeConstraintConcept, TypeConstraintArgs); 5796 Types.push_back(AT); 5797 AutoTypes.InsertNode(AT, InsertPos); 5798 return QualType(AT, 0); 5799 } 5800 5801 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5802 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5803 /// canonical deduced-but-dependent 'auto' type. 5804 QualType 5805 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5806 bool IsDependent, bool IsPack, 5807 ConceptDecl *TypeConstraintConcept, 5808 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5809 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5810 assert((!IsDependent || DeducedType.isNull()) && 5811 "A dependent auto should be undeduced"); 5812 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5813 TypeConstraintConcept, TypeConstraintArgs); 5814 } 5815 5816 /// Return the uniqued reference to the deduced template specialization type 5817 /// which has been deduced to the given type, or to the canonical undeduced 5818 /// such type, or the canonical deduced-but-dependent such type. 5819 QualType ASTContext::getDeducedTemplateSpecializationType( 5820 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5821 // Look in the folding set for an existing type. 5822 void *InsertPos = nullptr; 5823 llvm::FoldingSetNodeID ID; 5824 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5825 IsDependent); 5826 if (DeducedTemplateSpecializationType *DTST = 5827 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5828 return QualType(DTST, 0); 5829 5830 auto *DTST = new (*this, TypeAlignment) 5831 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5832 llvm::FoldingSetNodeID TempID; 5833 DTST->Profile(TempID); 5834 assert(ID == TempID && "ID does not match"); 5835 Types.push_back(DTST); 5836 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5837 return QualType(DTST, 0); 5838 } 5839 5840 /// getAtomicType - Return the uniqued reference to the atomic type for 5841 /// the given value type. 5842 QualType ASTContext::getAtomicType(QualType T) const { 5843 // Unique pointers, to guarantee there is only one pointer of a particular 5844 // structure. 5845 llvm::FoldingSetNodeID ID; 5846 AtomicType::Profile(ID, T); 5847 5848 void *InsertPos = nullptr; 5849 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5850 return QualType(AT, 0); 5851 5852 // If the atomic value type isn't canonical, this won't be a canonical type 5853 // either, so fill in the canonical type field. 5854 QualType Canonical; 5855 if (!T.isCanonical()) { 5856 Canonical = getAtomicType(getCanonicalType(T)); 5857 5858 // Get the new insert position for the node we care about. 5859 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5860 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5861 } 5862 auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 5863 Types.push_back(New); 5864 AtomicTypes.InsertNode(New, InsertPos); 5865 return QualType(New, 0); 5866 } 5867 5868 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5869 QualType ASTContext::getAutoDeductType() const { 5870 if (AutoDeductTy.isNull()) 5871 AutoDeductTy = QualType(new (*this, TypeAlignment) 5872 AutoType(QualType(), AutoTypeKeyword::Auto, 5873 TypeDependence::None, QualType(), 5874 /*concept*/ nullptr, /*args*/ {}), 5875 0); 5876 return AutoDeductTy; 5877 } 5878 5879 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5880 QualType ASTContext::getAutoRRefDeductType() const { 5881 if (AutoRRefDeductTy.isNull()) 5882 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5883 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5884 return AutoRRefDeductTy; 5885 } 5886 5887 /// getTagDeclType - Return the unique reference to the type for the 5888 /// specified TagDecl (struct/union/class/enum) decl. 5889 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5890 assert(Decl); 5891 // FIXME: What is the design on getTagDeclType when it requires casting 5892 // away const? mutable? 5893 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5894 } 5895 5896 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5897 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5898 /// needs to agree with the definition in <stddef.h>. 5899 CanQualType ASTContext::getSizeType() const { 5900 return getFromTargetType(Target->getSizeType()); 5901 } 5902 5903 /// Return the unique signed counterpart of the integer type 5904 /// corresponding to size_t. 5905 CanQualType ASTContext::getSignedSizeType() const { 5906 return getFromTargetType(Target->getSignedSizeType()); 5907 } 5908 5909 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5910 CanQualType ASTContext::getIntMaxType() const { 5911 return getFromTargetType(Target->getIntMaxType()); 5912 } 5913 5914 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5915 CanQualType ASTContext::getUIntMaxType() const { 5916 return getFromTargetType(Target->getUIntMaxType()); 5917 } 5918 5919 /// getSignedWCharType - Return the type of "signed wchar_t". 5920 /// Used when in C++, as a GCC extension. 5921 QualType ASTContext::getSignedWCharType() const { 5922 // FIXME: derive from "Target" ? 5923 return WCharTy; 5924 } 5925 5926 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5927 /// Used when in C++, as a GCC extension. 5928 QualType ASTContext::getUnsignedWCharType() const { 5929 // FIXME: derive from "Target" ? 5930 return UnsignedIntTy; 5931 } 5932 5933 QualType ASTContext::getIntPtrType() const { 5934 return getFromTargetType(Target->getIntPtrType()); 5935 } 5936 5937 QualType ASTContext::getUIntPtrType() const { 5938 return getCorrespondingUnsignedType(getIntPtrType()); 5939 } 5940 5941 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5942 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5943 QualType ASTContext::getPointerDiffType() const { 5944 return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); 5945 } 5946 5947 /// Return the unique unsigned counterpart of "ptrdiff_t" 5948 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5949 /// in the definition of %tu format specifier. 5950 QualType ASTContext::getUnsignedPointerDiffType() const { 5951 return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); 5952 } 5953 5954 /// Return the unique type for "pid_t" defined in 5955 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5956 QualType ASTContext::getProcessIDType() const { 5957 return getFromTargetType(Target->getProcessIDType()); 5958 } 5959 5960 //===----------------------------------------------------------------------===// 5961 // Type Operators 5962 //===----------------------------------------------------------------------===// 5963 5964 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5965 // Push qualifiers into arrays, and then discard any remaining 5966 // qualifiers. 5967 T = getCanonicalType(T); 5968 T = getVariableArrayDecayedType(T); 5969 const Type *Ty = T.getTypePtr(); 5970 QualType Result; 5971 if (isa<ArrayType>(Ty)) { 5972 Result = getArrayDecayedType(QualType(Ty,0)); 5973 } else if (isa<FunctionType>(Ty)) { 5974 Result = getPointerType(QualType(Ty, 0)); 5975 } else { 5976 Result = QualType(Ty, 0); 5977 } 5978 5979 return CanQualType::CreateUnsafe(Result); 5980 } 5981 5982 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5983 Qualifiers &quals) { 5984 SplitQualType splitType = type.getSplitUnqualifiedType(); 5985 5986 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5987 // the unqualified desugared type and then drops it on the floor. 5988 // We then have to strip that sugar back off with 5989 // getUnqualifiedDesugaredType(), which is silly. 5990 const auto *AT = 5991 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5992 5993 // If we don't have an array, just use the results in splitType. 5994 if (!AT) { 5995 quals = splitType.Quals; 5996 return QualType(splitType.Ty, 0); 5997 } 5998 5999 // Otherwise, recurse on the array's element type. 6000 QualType elementType = AT->getElementType(); 6001 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 6002 6003 // If that didn't change the element type, AT has no qualifiers, so we 6004 // can just use the results in splitType. 6005 if (elementType == unqualElementType) { 6006 assert(quals.empty()); // from the recursive call 6007 quals = splitType.Quals; 6008 return QualType(splitType.Ty, 0); 6009 } 6010 6011 // Otherwise, add in the qualifiers from the outermost type, then 6012 // build the type back up. 6013 quals.addConsistentQualifiers(splitType.Quals); 6014 6015 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 6016 return getConstantArrayType(unqualElementType, CAT->getSize(), 6017 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 6018 } 6019 6020 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 6021 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 6022 } 6023 6024 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 6025 return getVariableArrayType(unqualElementType, 6026 VAT->getSizeExpr(), 6027 VAT->getSizeModifier(), 6028 VAT->getIndexTypeCVRQualifiers(), 6029 VAT->getBracketsRange()); 6030 } 6031 6032 const auto *DSAT = cast<DependentSizedArrayType>(AT); 6033 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 6034 DSAT->getSizeModifier(), 0, 6035 SourceRange()); 6036 } 6037 6038 /// Attempt to unwrap two types that may both be array types with the same bound 6039 /// (or both be array types of unknown bound) for the purpose of comparing the 6040 /// cv-decomposition of two types per C++ [conv.qual]. 6041 /// 6042 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6043 /// C++20 [conv.qual], if permitted by the current language mode. 6044 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 6045 bool AllowPiMismatch) { 6046 while (true) { 6047 auto *AT1 = getAsArrayType(T1); 6048 if (!AT1) 6049 return; 6050 6051 auto *AT2 = getAsArrayType(T2); 6052 if (!AT2) 6053 return; 6054 6055 // If we don't have two array types with the same constant bound nor two 6056 // incomplete array types, we've unwrapped everything we can. 6057 // C++20 also permits one type to be a constant array type and the other 6058 // to be an incomplete array type. 6059 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6060 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6061 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6062 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6063 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6064 isa<IncompleteArrayType>(AT2)))) 6065 return; 6066 } else if (isa<IncompleteArrayType>(AT1)) { 6067 if (!(isa<IncompleteArrayType>(AT2) || 6068 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6069 isa<ConstantArrayType>(AT2)))) 6070 return; 6071 } else { 6072 return; 6073 } 6074 6075 T1 = AT1->getElementType(); 6076 T2 = AT2->getElementType(); 6077 } 6078 } 6079 6080 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6081 /// 6082 /// If T1 and T2 are both pointer types of the same kind, or both array types 6083 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6084 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6085 /// 6086 /// This function will typically be called in a loop that successively 6087 /// "unwraps" pointer and pointer-to-member types to compare them at each 6088 /// level. 6089 /// 6090 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6091 /// C++20 [conv.qual], if permitted by the current language mode. 6092 /// 6093 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6094 /// pair of types that can't be unwrapped further. 6095 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6096 bool AllowPiMismatch) { 6097 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6098 6099 const auto *T1PtrType = T1->getAs<PointerType>(); 6100 const auto *T2PtrType = T2->getAs<PointerType>(); 6101 if (T1PtrType && T2PtrType) { 6102 T1 = T1PtrType->getPointeeType(); 6103 T2 = T2PtrType->getPointeeType(); 6104 return true; 6105 } 6106 6107 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6108 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6109 if (T1MPType && T2MPType && 6110 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6111 QualType(T2MPType->getClass(), 0))) { 6112 T1 = T1MPType->getPointeeType(); 6113 T2 = T2MPType->getPointeeType(); 6114 return true; 6115 } 6116 6117 if (getLangOpts().ObjC) { 6118 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6119 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6120 if (T1OPType && T2OPType) { 6121 T1 = T1OPType->getPointeeType(); 6122 T2 = T2OPType->getPointeeType(); 6123 return true; 6124 } 6125 } 6126 6127 // FIXME: Block pointers, too? 6128 6129 return false; 6130 } 6131 6132 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6133 while (true) { 6134 Qualifiers Quals; 6135 T1 = getUnqualifiedArrayType(T1, Quals); 6136 T2 = getUnqualifiedArrayType(T2, Quals); 6137 if (hasSameType(T1, T2)) 6138 return true; 6139 if (!UnwrapSimilarTypes(T1, T2)) 6140 return false; 6141 } 6142 } 6143 6144 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6145 while (true) { 6146 Qualifiers Quals1, Quals2; 6147 T1 = getUnqualifiedArrayType(T1, Quals1); 6148 T2 = getUnqualifiedArrayType(T2, Quals2); 6149 6150 Quals1.removeCVRQualifiers(); 6151 Quals2.removeCVRQualifiers(); 6152 if (Quals1 != Quals2) 6153 return false; 6154 6155 if (hasSameType(T1, T2)) 6156 return true; 6157 6158 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6159 return false; 6160 } 6161 } 6162 6163 DeclarationNameInfo 6164 ASTContext::getNameForTemplate(TemplateName Name, 6165 SourceLocation NameLoc) const { 6166 switch (Name.getKind()) { 6167 case TemplateName::QualifiedTemplate: 6168 case TemplateName::Template: 6169 // DNInfo work in progress: CHECKME: what about DNLoc? 6170 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6171 NameLoc); 6172 6173 case TemplateName::OverloadedTemplate: { 6174 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6175 // DNInfo work in progress: CHECKME: what about DNLoc? 6176 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6177 } 6178 6179 case TemplateName::AssumedTemplate: { 6180 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6181 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6182 } 6183 6184 case TemplateName::DependentTemplate: { 6185 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6186 DeclarationName DName; 6187 if (DTN->isIdentifier()) { 6188 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6189 return DeclarationNameInfo(DName, NameLoc); 6190 } else { 6191 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6192 // DNInfo work in progress: FIXME: source locations? 6193 DeclarationNameLoc DNLoc = 6194 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6195 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6196 } 6197 } 6198 6199 case TemplateName::SubstTemplateTemplateParm: { 6200 SubstTemplateTemplateParmStorage *subst 6201 = Name.getAsSubstTemplateTemplateParm(); 6202 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6203 NameLoc); 6204 } 6205 6206 case TemplateName::SubstTemplateTemplateParmPack: { 6207 SubstTemplateTemplateParmPackStorage *subst 6208 = Name.getAsSubstTemplateTemplateParmPack(); 6209 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6210 NameLoc); 6211 } 6212 case TemplateName::UsingTemplate: 6213 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6214 NameLoc); 6215 } 6216 6217 llvm_unreachable("bad template name kind!"); 6218 } 6219 6220 TemplateName 6221 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6222 switch (Name.getKind()) { 6223 case TemplateName::UsingTemplate: 6224 case TemplateName::QualifiedTemplate: 6225 case TemplateName::Template: { 6226 TemplateDecl *Template = Name.getAsTemplateDecl(); 6227 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6228 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6229 6230 // The canonical template name is the canonical template declaration. 6231 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6232 } 6233 6234 case TemplateName::OverloadedTemplate: 6235 case TemplateName::AssumedTemplate: 6236 llvm_unreachable("cannot canonicalize unresolved template"); 6237 6238 case TemplateName::DependentTemplate: { 6239 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6240 assert(DTN && "Non-dependent template names must refer to template decls."); 6241 return DTN->CanonicalTemplateName; 6242 } 6243 6244 case TemplateName::SubstTemplateTemplateParm: { 6245 SubstTemplateTemplateParmStorage *subst 6246 = Name.getAsSubstTemplateTemplateParm(); 6247 return getCanonicalTemplateName(subst->getReplacement()); 6248 } 6249 6250 case TemplateName::SubstTemplateTemplateParmPack: { 6251 SubstTemplateTemplateParmPackStorage *subst = 6252 Name.getAsSubstTemplateTemplateParmPack(); 6253 TemplateArgument canonArgPack = 6254 getCanonicalTemplateArgument(subst->getArgumentPack()); 6255 return getSubstTemplateTemplateParmPack( 6256 canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(), 6257 subst->getFinal(), subst->getIndex()); 6258 } 6259 } 6260 6261 llvm_unreachable("bad template name!"); 6262 } 6263 6264 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6265 const TemplateName &Y) const { 6266 return getCanonicalTemplateName(X).getAsVoidPointer() == 6267 getCanonicalTemplateName(Y).getAsVoidPointer(); 6268 } 6269 6270 bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { 6271 if (!XCE != !YCE) 6272 return false; 6273 6274 if (!XCE) 6275 return true; 6276 6277 llvm::FoldingSetNodeID XCEID, YCEID; 6278 XCE->Profile(XCEID, *this, /*Canonical=*/true); 6279 YCE->Profile(YCEID, *this, /*Canonical=*/true); 6280 return XCEID == YCEID; 6281 } 6282 6283 bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, 6284 const TypeConstraint *YTC) const { 6285 if (!XTC != !YTC) 6286 return false; 6287 6288 if (!XTC) 6289 return true; 6290 6291 auto *NCX = XTC->getNamedConcept(); 6292 auto *NCY = YTC->getNamedConcept(); 6293 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6294 return false; 6295 if (XTC->hasExplicitTemplateArgs() != YTC->hasExplicitTemplateArgs()) 6296 return false; 6297 if (XTC->hasExplicitTemplateArgs()) 6298 if (XTC->getTemplateArgsAsWritten()->NumTemplateArgs != 6299 YTC->getTemplateArgsAsWritten()->NumTemplateArgs) 6300 return false; 6301 6302 // Compare slowly by profiling. 6303 // 6304 // We couldn't compare the profiling result for the template 6305 // args here. Consider the following example in different modules: 6306 // 6307 // template <__integer_like _Tp, C<_Tp> Sentinel> 6308 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { 6309 // return __t; 6310 // } 6311 // 6312 // When we compare the profiling result for `C<_Tp>` in different 6313 // modules, it will compare the type of `_Tp` in different modules. 6314 // However, the type of `_Tp` in different modules refer to different 6315 // types here naturally. So we couldn't compare the profiling result 6316 // for the template args directly. 6317 return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), 6318 YTC->getImmediatelyDeclaredConstraint()); 6319 } 6320 6321 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6322 const NamedDecl *Y) const { 6323 if (X->getKind() != Y->getKind()) 6324 return false; 6325 6326 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6327 auto *TY = cast<TemplateTypeParmDecl>(Y); 6328 if (TX->isParameterPack() != TY->isParameterPack()) 6329 return false; 6330 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6331 return false; 6332 return isSameTypeConstraint(TX->getTypeConstraint(), 6333 TY->getTypeConstraint()); 6334 } 6335 6336 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6337 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6338 return TX->isParameterPack() == TY->isParameterPack() && 6339 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) && 6340 isSameConstraintExpr(TX->getPlaceholderTypeConstraint(), 6341 TY->getPlaceholderTypeConstraint()); 6342 } 6343 6344 auto *TX = cast<TemplateTemplateParmDecl>(X); 6345 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6346 return TX->isParameterPack() == TY->isParameterPack() && 6347 isSameTemplateParameterList(TX->getTemplateParameters(), 6348 TY->getTemplateParameters()); 6349 } 6350 6351 bool ASTContext::isSameTemplateParameterList( 6352 const TemplateParameterList *X, const TemplateParameterList *Y) const { 6353 if (X->size() != Y->size()) 6354 return false; 6355 6356 for (unsigned I = 0, N = X->size(); I != N; ++I) 6357 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6358 return false; 6359 6360 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); 6361 } 6362 6363 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, 6364 const NamedDecl *Y) const { 6365 // If the type parameter isn't the same already, we don't need to check the 6366 // default argument further. 6367 if (!isSameTemplateParameter(X, Y)) 6368 return false; 6369 6370 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { 6371 auto *TTPY = cast<TemplateTypeParmDecl>(Y); 6372 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6373 return false; 6374 6375 return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument()); 6376 } 6377 6378 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6379 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); 6380 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) 6381 return false; 6382 6383 Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts(); 6384 Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts(); 6385 llvm::FoldingSetNodeID XID, YID; 6386 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); 6387 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); 6388 return XID == YID; 6389 } 6390 6391 auto *TTPX = cast<TemplateTemplateParmDecl>(X); 6392 auto *TTPY = cast<TemplateTemplateParmDecl>(Y); 6393 6394 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6395 return false; 6396 6397 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); 6398 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); 6399 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); 6400 } 6401 6402 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6403 if (auto *NS = X->getAsNamespace()) 6404 return NS; 6405 if (auto *NAS = X->getAsNamespaceAlias()) 6406 return NAS->getNamespace(); 6407 return nullptr; 6408 } 6409 6410 static bool isSameQualifier(const NestedNameSpecifier *X, 6411 const NestedNameSpecifier *Y) { 6412 if (auto *NSX = getNamespace(X)) { 6413 auto *NSY = getNamespace(Y); 6414 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6415 return false; 6416 } else if (X->getKind() != Y->getKind()) 6417 return false; 6418 6419 // FIXME: For namespaces and types, we're permitted to check that the entity 6420 // is named via the same tokens. We should probably do so. 6421 switch (X->getKind()) { 6422 case NestedNameSpecifier::Identifier: 6423 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6424 return false; 6425 break; 6426 case NestedNameSpecifier::Namespace: 6427 case NestedNameSpecifier::NamespaceAlias: 6428 // We've already checked that we named the same namespace. 6429 break; 6430 case NestedNameSpecifier::TypeSpec: 6431 case NestedNameSpecifier::TypeSpecWithTemplate: 6432 if (X->getAsType()->getCanonicalTypeInternal() != 6433 Y->getAsType()->getCanonicalTypeInternal()) 6434 return false; 6435 break; 6436 case NestedNameSpecifier::Global: 6437 case NestedNameSpecifier::Super: 6438 return true; 6439 } 6440 6441 // Recurse into earlier portion of NNS, if any. 6442 auto *PX = X->getPrefix(); 6443 auto *PY = Y->getPrefix(); 6444 if (PX && PY) 6445 return isSameQualifier(PX, PY); 6446 return !PX && !PY; 6447 } 6448 6449 /// Determine whether the attributes we can overload on are identical for A and 6450 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6451 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6452 const FunctionDecl *B) { 6453 // Note that pass_object_size attributes are represented in the function's 6454 // ExtParameterInfo, so we don't need to check them here. 6455 6456 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6457 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6458 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6459 6460 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6461 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6462 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6463 6464 // Return false if the number of enable_if attributes is different. 6465 if (!Cand1A || !Cand2A) 6466 return false; 6467 6468 Cand1ID.clear(); 6469 Cand2ID.clear(); 6470 6471 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6472 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6473 6474 // Return false if any of the enable_if expressions of A and B are 6475 // different. 6476 if (Cand1ID != Cand2ID) 6477 return false; 6478 } 6479 return true; 6480 } 6481 6482 bool ASTContext::FriendsDifferByConstraints(const FunctionDecl *X, 6483 const FunctionDecl *Y) const { 6484 // If these aren't friends, then they aren't friends that differ by 6485 // constraints. 6486 if (!X->getFriendObjectKind() || !Y->getFriendObjectKind()) 6487 return false; 6488 6489 // If the two functions share lexical declaration context, they are not in 6490 // separate instantations, and thus in the same scope. 6491 if (X->getLexicalDeclContext() == Y->getLexicalDeclContext()) 6492 return false; 6493 6494 if (!X->getDescribedFunctionTemplate()) { 6495 assert(!Y->getDescribedFunctionTemplate() && 6496 "How would these be the same if they aren't both templates?"); 6497 6498 // If these friends don't have constraints, they aren't constrained, and 6499 // thus don't fall under temp.friend p9. Else the simple presence of a 6500 // constraint makes them unique. 6501 return X->getTrailingRequiresClause(); 6502 } 6503 6504 return X->FriendConstraintRefersToEnclosingTemplate(); 6505 } 6506 6507 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { 6508 if (X == Y) 6509 return true; 6510 6511 if (X->getDeclName() != Y->getDeclName()) 6512 return false; 6513 6514 // Must be in the same context. 6515 // 6516 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6517 // could be two different declarations of the same function. (We will fix the 6518 // semantic DC to refer to the primary definition after merging.) 6519 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6520 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6521 return false; 6522 6523 // Two typedefs refer to the same entity if they have the same underlying 6524 // type. 6525 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6526 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6527 return hasSameType(TypedefX->getUnderlyingType(), 6528 TypedefY->getUnderlyingType()); 6529 6530 // Must have the same kind. 6531 if (X->getKind() != Y->getKind()) 6532 return false; 6533 6534 // Objective-C classes and protocols with the same name always match. 6535 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6536 return true; 6537 6538 if (isa<ClassTemplateSpecializationDecl>(X)) { 6539 // No need to handle these here: we merge them when adding them to the 6540 // template. 6541 return false; 6542 } 6543 6544 // Compatible tags match. 6545 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6546 const auto *TagY = cast<TagDecl>(Y); 6547 return (TagX->getTagKind() == TagY->getTagKind()) || 6548 ((TagX->getTagKind() == TTK_Struct || 6549 TagX->getTagKind() == TTK_Class || 6550 TagX->getTagKind() == TTK_Interface) && 6551 (TagY->getTagKind() == TTK_Struct || 6552 TagY->getTagKind() == TTK_Class || 6553 TagY->getTagKind() == TTK_Interface)); 6554 } 6555 6556 // Functions with the same type and linkage match. 6557 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6558 // functions, etc. 6559 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6560 const auto *FuncY = cast<FunctionDecl>(Y); 6561 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6562 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6563 if (CtorX->getInheritedConstructor() && 6564 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6565 CtorY->getInheritedConstructor().getConstructor())) 6566 return false; 6567 } 6568 6569 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6570 return false; 6571 6572 // Multiversioned functions with different feature strings are represented 6573 // as separate declarations. 6574 if (FuncX->isMultiVersion()) { 6575 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6576 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6577 assert(TAX && TAY && "Multiversion Function without target attribute"); 6578 6579 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6580 return false; 6581 } 6582 6583 if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), 6584 FuncY->getTrailingRequiresClause())) 6585 return false; 6586 6587 // Constrained friends are different in certain cases, see: [temp.friend]p9. 6588 if (FriendsDifferByConstraints(FuncX, FuncY)) 6589 return false; 6590 6591 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6592 // Map to the first declaration that we've already merged into this one. 6593 // The TSI of redeclarations might not match (due to calling conventions 6594 // being inherited onto the type but not the TSI), but the TSI type of 6595 // the first declaration of the function should match across modules. 6596 FD = FD->getCanonicalDecl(); 6597 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6598 : FD->getType(); 6599 }; 6600 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6601 if (!hasSameType(XT, YT)) { 6602 // We can get functions with different types on the redecl chain in C++17 6603 // if they have differing exception specifications and at least one of 6604 // the excpetion specs is unresolved. 6605 auto *XFPT = XT->getAs<FunctionProtoType>(); 6606 auto *YFPT = YT->getAs<FunctionProtoType>(); 6607 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6608 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6609 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6610 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6611 return true; 6612 return false; 6613 } 6614 6615 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6616 hasSameOverloadableAttrs(FuncX, FuncY); 6617 } 6618 6619 // Variables with the same type and linkage match. 6620 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6621 const auto *VarY = cast<VarDecl>(Y); 6622 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6623 if (hasSameType(VarX->getType(), VarY->getType())) 6624 return true; 6625 6626 // We can get decls with different types on the redecl chain. Eg. 6627 // template <typename T> struct S { static T Var[]; }; // #1 6628 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6629 // Only? happens when completing an incomplete array type. In this case 6630 // when comparing #1 and #2 we should go through their element type. 6631 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6632 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6633 if (!VarXTy || !VarYTy) 6634 return false; 6635 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6636 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6637 } 6638 return false; 6639 } 6640 6641 // Namespaces with the same name and inlinedness match. 6642 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6643 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6644 return NamespaceX->isInline() == NamespaceY->isInline(); 6645 } 6646 6647 // Identical template names and kinds match if their template parameter lists 6648 // and patterns match. 6649 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6650 const auto *TemplateY = cast<TemplateDecl>(Y); 6651 6652 // ConceptDecl wouldn't be the same if their constraint expression differs. 6653 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { 6654 const auto *ConceptY = cast<ConceptDecl>(Y); 6655 const Expr *XCE = ConceptX->getConstraintExpr(); 6656 const Expr *YCE = ConceptY->getConstraintExpr(); 6657 assert(XCE && YCE && "ConceptDecl without constraint expression?"); 6658 llvm::FoldingSetNodeID XID, YID; 6659 XCE->Profile(XID, *this, /*Canonical=*/true); 6660 YCE->Profile(YID, *this, /*Canonical=*/true); 6661 if (XID != YID) 6662 return false; 6663 } 6664 6665 return isSameEntity(TemplateX->getTemplatedDecl(), 6666 TemplateY->getTemplatedDecl()) && 6667 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6668 TemplateY->getTemplateParameters()); 6669 } 6670 6671 // Fields with the same name and the same type match. 6672 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6673 const auto *FDY = cast<FieldDecl>(Y); 6674 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6675 return hasSameType(FDX->getType(), FDY->getType()); 6676 } 6677 6678 // Indirect fields with the same target field match. 6679 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6680 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6681 return IFDX->getAnonField()->getCanonicalDecl() == 6682 IFDY->getAnonField()->getCanonicalDecl(); 6683 } 6684 6685 // Enumerators with the same name match. 6686 if (isa<EnumConstantDecl>(X)) 6687 // FIXME: Also check the value is odr-equivalent. 6688 return true; 6689 6690 // Using shadow declarations with the same target match. 6691 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6692 const auto *USY = cast<UsingShadowDecl>(Y); 6693 return USX->getTargetDecl() == USY->getTargetDecl(); 6694 } 6695 6696 // Using declarations with the same qualifier match. (We already know that 6697 // the name matches.) 6698 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6699 const auto *UY = cast<UsingDecl>(Y); 6700 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6701 UX->hasTypename() == UY->hasTypename() && 6702 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6703 } 6704 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6705 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6706 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6707 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6708 } 6709 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6710 return isSameQualifier( 6711 UX->getQualifier(), 6712 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6713 } 6714 6715 // Using-pack declarations are only created by instantiation, and match if 6716 // they're instantiated from matching UnresolvedUsing...Decls. 6717 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6718 return declaresSameEntity( 6719 UX->getInstantiatedFromUsingDecl(), 6720 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6721 } 6722 6723 // Namespace alias definitions with the same target match. 6724 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6725 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6726 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6727 } 6728 6729 return false; 6730 } 6731 6732 TemplateArgument 6733 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6734 switch (Arg.getKind()) { 6735 case TemplateArgument::Null: 6736 return Arg; 6737 6738 case TemplateArgument::Expression: 6739 return Arg; 6740 6741 case TemplateArgument::Declaration: { 6742 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6743 return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl())); 6744 } 6745 6746 case TemplateArgument::NullPtr: 6747 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6748 /*isNullPtr*/true); 6749 6750 case TemplateArgument::Template: 6751 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); 6752 6753 case TemplateArgument::TemplateExpansion: 6754 return TemplateArgument(getCanonicalTemplateName( 6755 Arg.getAsTemplateOrTemplatePattern()), 6756 Arg.getNumTemplateExpansions()); 6757 6758 case TemplateArgument::Integral: 6759 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6760 6761 case TemplateArgument::Type: 6762 return TemplateArgument(getCanonicalType(Arg.getAsType())); 6763 6764 case TemplateArgument::Pack: { 6765 bool AnyNonCanonArgs = false; 6766 auto CanonArgs = ::getCanonicalTemplateArguments( 6767 *this, Arg.pack_elements(), AnyNonCanonArgs); 6768 if (!AnyNonCanonArgs) 6769 return Arg; 6770 return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), 6771 CanonArgs); 6772 } 6773 } 6774 6775 // Silence GCC warning 6776 llvm_unreachable("Unhandled template argument kind"); 6777 } 6778 6779 NestedNameSpecifier * 6780 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6781 if (!NNS) 6782 return nullptr; 6783 6784 switch (NNS->getKind()) { 6785 case NestedNameSpecifier::Identifier: 6786 // Canonicalize the prefix but keep the identifier the same. 6787 return NestedNameSpecifier::Create(*this, 6788 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6789 NNS->getAsIdentifier()); 6790 6791 case NestedNameSpecifier::Namespace: 6792 // A namespace is canonical; build a nested-name-specifier with 6793 // this namespace and no prefix. 6794 return NestedNameSpecifier::Create(*this, nullptr, 6795 NNS->getAsNamespace()->getOriginalNamespace()); 6796 6797 case NestedNameSpecifier::NamespaceAlias: 6798 // A namespace is canonical; build a nested-name-specifier with 6799 // this namespace and no prefix. 6800 return NestedNameSpecifier::Create(*this, nullptr, 6801 NNS->getAsNamespaceAlias()->getNamespace() 6802 ->getOriginalNamespace()); 6803 6804 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6805 // latter will have the 'template' keyword when printed. 6806 case NestedNameSpecifier::TypeSpec: 6807 case NestedNameSpecifier::TypeSpecWithTemplate: { 6808 const Type *T = getCanonicalType(NNS->getAsType()); 6809 6810 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6811 // break it apart into its prefix and identifier, then reconsititute those 6812 // as the canonical nested-name-specifier. This is required to canonicalize 6813 // a dependent nested-name-specifier involving typedefs of dependent-name 6814 // types, e.g., 6815 // typedef typename T::type T1; 6816 // typedef typename T1::type T2; 6817 if (const auto *DNT = T->getAs<DependentNameType>()) 6818 return NestedNameSpecifier::Create( 6819 *this, DNT->getQualifier(), 6820 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6821 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6822 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6823 const_cast<Type *>(T)); 6824 6825 // TODO: Set 'Template' parameter to true for other template types. 6826 return NestedNameSpecifier::Create(*this, nullptr, false, 6827 const_cast<Type *>(T)); 6828 } 6829 6830 case NestedNameSpecifier::Global: 6831 case NestedNameSpecifier::Super: 6832 // The global specifier and __super specifer are canonical and unique. 6833 return NNS; 6834 } 6835 6836 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6837 } 6838 6839 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6840 // Handle the non-qualified case efficiently. 6841 if (!T.hasLocalQualifiers()) { 6842 // Handle the common positive case fast. 6843 if (const auto *AT = dyn_cast<ArrayType>(T)) 6844 return AT; 6845 } 6846 6847 // Handle the common negative case fast. 6848 if (!isa<ArrayType>(T.getCanonicalType())) 6849 return nullptr; 6850 6851 // Apply any qualifiers from the array type to the element type. This 6852 // implements C99 6.7.3p8: "If the specification of an array type includes 6853 // any type qualifiers, the element type is so qualified, not the array type." 6854 6855 // If we get here, we either have type qualifiers on the type, or we have 6856 // sugar such as a typedef in the way. If we have type qualifiers on the type 6857 // we must propagate them down into the element type. 6858 6859 SplitQualType split = T.getSplitDesugaredType(); 6860 Qualifiers qs = split.Quals; 6861 6862 // If we have a simple case, just return now. 6863 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6864 if (!ATy || qs.empty()) 6865 return ATy; 6866 6867 // Otherwise, we have an array and we have qualifiers on it. Push the 6868 // qualifiers into the array element type and return a new array type. 6869 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6870 6871 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6872 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6873 CAT->getSizeExpr(), 6874 CAT->getSizeModifier(), 6875 CAT->getIndexTypeCVRQualifiers())); 6876 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6877 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6878 IAT->getSizeModifier(), 6879 IAT->getIndexTypeCVRQualifiers())); 6880 6881 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6882 return cast<ArrayType>( 6883 getDependentSizedArrayType(NewEltTy, 6884 DSAT->getSizeExpr(), 6885 DSAT->getSizeModifier(), 6886 DSAT->getIndexTypeCVRQualifiers(), 6887 DSAT->getBracketsRange())); 6888 6889 const auto *VAT = cast<VariableArrayType>(ATy); 6890 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6891 VAT->getSizeExpr(), 6892 VAT->getSizeModifier(), 6893 VAT->getIndexTypeCVRQualifiers(), 6894 VAT->getBracketsRange())); 6895 } 6896 6897 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6898 if (T->isArrayType() || T->isFunctionType()) 6899 return getDecayedType(T); 6900 return T; 6901 } 6902 6903 QualType ASTContext::getSignatureParameterType(QualType T) const { 6904 T = getVariableArrayDecayedType(T); 6905 T = getAdjustedParameterType(T); 6906 return T.getUnqualifiedType(); 6907 } 6908 6909 QualType ASTContext::getExceptionObjectType(QualType T) const { 6910 // C++ [except.throw]p3: 6911 // A throw-expression initializes a temporary object, called the exception 6912 // object, the type of which is determined by removing any top-level 6913 // cv-qualifiers from the static type of the operand of throw and adjusting 6914 // the type from "array of T" or "function returning T" to "pointer to T" 6915 // or "pointer to function returning T", [...] 6916 T = getVariableArrayDecayedType(T); 6917 if (T->isArrayType() || T->isFunctionType()) 6918 T = getDecayedType(T); 6919 return T.getUnqualifiedType(); 6920 } 6921 6922 /// getArrayDecayedType - Return the properly qualified result of decaying the 6923 /// specified array type to a pointer. This operation is non-trivial when 6924 /// handling typedefs etc. The canonical type of "T" must be an array type, 6925 /// this returns a pointer to a properly qualified element of the array. 6926 /// 6927 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6928 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6929 // Get the element type with 'getAsArrayType' so that we don't lose any 6930 // typedefs in the element type of the array. This also handles propagation 6931 // of type qualifiers from the array type into the element type if present 6932 // (C99 6.7.3p8). 6933 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6934 assert(PrettyArrayType && "Not an array type!"); 6935 6936 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6937 6938 // int x[restrict 4] -> int *restrict 6939 QualType Result = getQualifiedType(PtrTy, 6940 PrettyArrayType->getIndexTypeQualifiers()); 6941 6942 // int x[_Nullable] -> int * _Nullable 6943 if (auto Nullability = Ty->getNullability()) { 6944 Result = const_cast<ASTContext *>(this)->getAttributedType( 6945 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6946 } 6947 return Result; 6948 } 6949 6950 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6951 return getBaseElementType(array->getElementType()); 6952 } 6953 6954 QualType ASTContext::getBaseElementType(QualType type) const { 6955 Qualifiers qs; 6956 while (true) { 6957 SplitQualType split = type.getSplitDesugaredType(); 6958 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6959 if (!array) break; 6960 6961 type = array->getElementType(); 6962 qs.addConsistentQualifiers(split.Quals); 6963 } 6964 6965 return getQualifiedType(type, qs); 6966 } 6967 6968 /// getConstantArrayElementCount - Returns number of constant array elements. 6969 uint64_t 6970 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6971 uint64_t ElementCount = 1; 6972 do { 6973 ElementCount *= CA->getSize().getZExtValue(); 6974 CA = dyn_cast_or_null<ConstantArrayType>( 6975 CA->getElementType()->getAsArrayTypeUnsafe()); 6976 } while (CA); 6977 return ElementCount; 6978 } 6979 6980 uint64_t ASTContext::getArrayInitLoopExprElementCount( 6981 const ArrayInitLoopExpr *AILE) const { 6982 if (!AILE) 6983 return 0; 6984 6985 uint64_t ElementCount = 1; 6986 6987 do { 6988 ElementCount *= AILE->getArraySize().getZExtValue(); 6989 AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr()); 6990 } while (AILE); 6991 6992 return ElementCount; 6993 } 6994 6995 /// getFloatingRank - Return a relative rank for floating point types. 6996 /// This routine will assert if passed a built-in type that isn't a float. 6997 static FloatingRank getFloatingRank(QualType T) { 6998 if (const auto *CT = T->getAs<ComplexType>()) 6999 return getFloatingRank(CT->getElementType()); 7000 7001 switch (T->castAs<BuiltinType>()->getKind()) { 7002 default: llvm_unreachable("getFloatingRank(): not a floating type"); 7003 case BuiltinType::Float16: return Float16Rank; 7004 case BuiltinType::Half: return HalfRank; 7005 case BuiltinType::Float: return FloatRank; 7006 case BuiltinType::Double: return DoubleRank; 7007 case BuiltinType::LongDouble: return LongDoubleRank; 7008 case BuiltinType::Float128: return Float128Rank; 7009 case BuiltinType::BFloat16: return BFloat16Rank; 7010 case BuiltinType::Ibm128: return Ibm128Rank; 7011 } 7012 } 7013 7014 /// getFloatingTypeOrder - Compare the rank of the two specified floating 7015 /// point types, ignoring the domain of the type (i.e. 'double' == 7016 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 7017 /// LHS < RHS, return -1. 7018 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 7019 FloatingRank LHSR = getFloatingRank(LHS); 7020 FloatingRank RHSR = getFloatingRank(RHS); 7021 7022 if (LHSR == RHSR) 7023 return 0; 7024 if (LHSR > RHSR) 7025 return 1; 7026 return -1; 7027 } 7028 7029 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 7030 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 7031 return 0; 7032 return getFloatingTypeOrder(LHS, RHS); 7033 } 7034 7035 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 7036 /// routine will assert if passed a built-in type that isn't an integer or enum, 7037 /// or if it is not canonicalized. 7038 unsigned ASTContext::getIntegerRank(const Type *T) const { 7039 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 7040 7041 // Results in this 'losing' to any type of the same size, but winning if 7042 // larger. 7043 if (const auto *EIT = dyn_cast<BitIntType>(T)) 7044 return 0 + (EIT->getNumBits() << 3); 7045 7046 switch (cast<BuiltinType>(T)->getKind()) { 7047 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 7048 case BuiltinType::Bool: 7049 return 1 + (getIntWidth(BoolTy) << 3); 7050 case BuiltinType::Char_S: 7051 case BuiltinType::Char_U: 7052 case BuiltinType::SChar: 7053 case BuiltinType::UChar: 7054 return 2 + (getIntWidth(CharTy) << 3); 7055 case BuiltinType::Short: 7056 case BuiltinType::UShort: 7057 return 3 + (getIntWidth(ShortTy) << 3); 7058 case BuiltinType::Int: 7059 case BuiltinType::UInt: 7060 return 4 + (getIntWidth(IntTy) << 3); 7061 case BuiltinType::Long: 7062 case BuiltinType::ULong: 7063 return 5 + (getIntWidth(LongTy) << 3); 7064 case BuiltinType::LongLong: 7065 case BuiltinType::ULongLong: 7066 return 6 + (getIntWidth(LongLongTy) << 3); 7067 case BuiltinType::Int128: 7068 case BuiltinType::UInt128: 7069 return 7 + (getIntWidth(Int128Ty) << 3); 7070 7071 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of 7072 // their underlying types" [c++20 conv.rank] 7073 case BuiltinType::Char8: 7074 return getIntegerRank(UnsignedCharTy.getTypePtr()); 7075 case BuiltinType::Char16: 7076 return getIntegerRank( 7077 getFromTargetType(Target->getChar16Type()).getTypePtr()); 7078 case BuiltinType::Char32: 7079 return getIntegerRank( 7080 getFromTargetType(Target->getChar32Type()).getTypePtr()); 7081 case BuiltinType::WChar_S: 7082 case BuiltinType::WChar_U: 7083 return getIntegerRank( 7084 getFromTargetType(Target->getWCharType()).getTypePtr()); 7085 } 7086 } 7087 7088 /// Whether this is a promotable bitfield reference according 7089 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 7090 /// 7091 /// \returns the type this bit-field will promote to, or NULL if no 7092 /// promotion occurs. 7093 QualType ASTContext::isPromotableBitField(Expr *E) const { 7094 if (E->isTypeDependent() || E->isValueDependent()) 7095 return {}; 7096 7097 // C++ [conv.prom]p5: 7098 // If the bit-field has an enumerated type, it is treated as any other 7099 // value of that type for promotion purposes. 7100 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 7101 return {}; 7102 7103 // FIXME: We should not do this unless E->refersToBitField() is true. This 7104 // matters in C where getSourceBitField() will find bit-fields for various 7105 // cases where the source expression is not a bit-field designator. 7106 7107 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 7108 if (!Field) 7109 return {}; 7110 7111 QualType FT = Field->getType(); 7112 7113 uint64_t BitWidth = Field->getBitWidthValue(*this); 7114 uint64_t IntSize = getTypeSize(IntTy); 7115 // C++ [conv.prom]p5: 7116 // A prvalue for an integral bit-field can be converted to a prvalue of type 7117 // int if int can represent all the values of the bit-field; otherwise, it 7118 // can be converted to unsigned int if unsigned int can represent all the 7119 // values of the bit-field. If the bit-field is larger yet, no integral 7120 // promotion applies to it. 7121 // C11 6.3.1.1/2: 7122 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 7123 // If an int can represent all values of the original type (as restricted by 7124 // the width, for a bit-field), the value is converted to an int; otherwise, 7125 // it is converted to an unsigned int. 7126 // 7127 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 7128 // We perform that promotion here to match GCC and C++. 7129 // FIXME: C does not permit promotion of an enum bit-field whose rank is 7130 // greater than that of 'int'. We perform that promotion to match GCC. 7131 if (BitWidth < IntSize) 7132 return IntTy; 7133 7134 if (BitWidth == IntSize) 7135 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 7136 7137 // Bit-fields wider than int are not subject to promotions, and therefore act 7138 // like the base type. GCC has some weird bugs in this area that we 7139 // deliberately do not follow (GCC follows a pre-standard resolution to 7140 // C's DR315 which treats bit-width as being part of the type, and this leaks 7141 // into their semantics in some cases). 7142 return {}; 7143 } 7144 7145 /// getPromotedIntegerType - Returns the type that Promotable will 7146 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 7147 /// integer type. 7148 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 7149 assert(!Promotable.isNull()); 7150 assert(isPromotableIntegerType(Promotable)); 7151 if (const auto *ET = Promotable->getAs<EnumType>()) 7152 return ET->getDecl()->getPromotionType(); 7153 7154 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 7155 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 7156 // (3.9.1) can be converted to a prvalue of the first of the following 7157 // types that can represent all the values of its underlying type: 7158 // int, unsigned int, long int, unsigned long int, long long int, or 7159 // unsigned long long int [...] 7160 // FIXME: Is there some better way to compute this? 7161 if (BT->getKind() == BuiltinType::WChar_S || 7162 BT->getKind() == BuiltinType::WChar_U || 7163 BT->getKind() == BuiltinType::Char8 || 7164 BT->getKind() == BuiltinType::Char16 || 7165 BT->getKind() == BuiltinType::Char32) { 7166 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 7167 uint64_t FromSize = getTypeSize(BT); 7168 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 7169 LongLongTy, UnsignedLongLongTy }; 7170 for (const auto &PT : PromoteTypes) { 7171 uint64_t ToSize = getTypeSize(PT); 7172 if (FromSize < ToSize || 7173 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType())) 7174 return PT; 7175 } 7176 llvm_unreachable("char type should fit into long long"); 7177 } 7178 } 7179 7180 // At this point, we should have a signed or unsigned integer type. 7181 if (Promotable->isSignedIntegerType()) 7182 return IntTy; 7183 uint64_t PromotableSize = getIntWidth(Promotable); 7184 uint64_t IntSize = getIntWidth(IntTy); 7185 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7186 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7187 } 7188 7189 /// Recurses in pointer/array types until it finds an objc retainable 7190 /// type and returns its ownership. 7191 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7192 while (!T.isNull()) { 7193 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7194 return T.getObjCLifetime(); 7195 if (T->isArrayType()) 7196 T = getBaseElementType(T); 7197 else if (const auto *PT = T->getAs<PointerType>()) 7198 T = PT->getPointeeType(); 7199 else if (const auto *RT = T->getAs<ReferenceType>()) 7200 T = RT->getPointeeType(); 7201 else 7202 break; 7203 } 7204 7205 return Qualifiers::OCL_None; 7206 } 7207 7208 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7209 // Incomplete enum types are not treated as integer types. 7210 // FIXME: In C++, enum types are never integer types. 7211 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7212 return ET->getDecl()->getIntegerType().getTypePtr(); 7213 return nullptr; 7214 } 7215 7216 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7217 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7218 /// LHS < RHS, return -1. 7219 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7220 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7221 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7222 7223 // Unwrap enums to their underlying type. 7224 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7225 LHSC = getIntegerTypeForEnum(ET); 7226 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7227 RHSC = getIntegerTypeForEnum(ET); 7228 7229 if (LHSC == RHSC) return 0; 7230 7231 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7232 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7233 7234 unsigned LHSRank = getIntegerRank(LHSC); 7235 unsigned RHSRank = getIntegerRank(RHSC); 7236 7237 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7238 if (LHSRank == RHSRank) return 0; 7239 return LHSRank > RHSRank ? 1 : -1; 7240 } 7241 7242 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7243 if (LHSUnsigned) { 7244 // If the unsigned [LHS] type is larger, return it. 7245 if (LHSRank >= RHSRank) 7246 return 1; 7247 7248 // If the signed type can represent all values of the unsigned type, it 7249 // wins. Because we are dealing with 2's complement and types that are 7250 // powers of two larger than each other, this is always safe. 7251 return -1; 7252 } 7253 7254 // If the unsigned [RHS] type is larger, return it. 7255 if (RHSRank >= LHSRank) 7256 return -1; 7257 7258 // If the signed type can represent all values of the unsigned type, it 7259 // wins. Because we are dealing with 2's complement and types that are 7260 // powers of two larger than each other, this is always safe. 7261 return 1; 7262 } 7263 7264 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7265 if (CFConstantStringTypeDecl) 7266 return CFConstantStringTypeDecl; 7267 7268 assert(!CFConstantStringTagDecl && 7269 "tag and typedef should be initialized together"); 7270 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7271 CFConstantStringTagDecl->startDefinition(); 7272 7273 struct { 7274 QualType Type; 7275 const char *Name; 7276 } Fields[5]; 7277 unsigned Count = 0; 7278 7279 /// Objective-C ABI 7280 /// 7281 /// typedef struct __NSConstantString_tag { 7282 /// const int *isa; 7283 /// int flags; 7284 /// const char *str; 7285 /// long length; 7286 /// } __NSConstantString; 7287 /// 7288 /// Swift ABI (4.1, 4.2) 7289 /// 7290 /// typedef struct __NSConstantString_tag { 7291 /// uintptr_t _cfisa; 7292 /// uintptr_t _swift_rc; 7293 /// _Atomic(uint64_t) _cfinfoa; 7294 /// const char *_ptr; 7295 /// uint32_t _length; 7296 /// } __NSConstantString; 7297 /// 7298 /// Swift ABI (5.0) 7299 /// 7300 /// typedef struct __NSConstantString_tag { 7301 /// uintptr_t _cfisa; 7302 /// uintptr_t _swift_rc; 7303 /// _Atomic(uint64_t) _cfinfoa; 7304 /// const char *_ptr; 7305 /// uintptr_t _length; 7306 /// } __NSConstantString; 7307 7308 const auto CFRuntime = getLangOpts().CFRuntime; 7309 if (static_cast<unsigned>(CFRuntime) < 7310 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7311 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7312 Fields[Count++] = { IntTy, "flags" }; 7313 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7314 Fields[Count++] = { LongTy, "length" }; 7315 } else { 7316 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7317 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7318 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7319 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7320 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7321 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7322 Fields[Count++] = { IntTy, "_ptr" }; 7323 else 7324 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7325 } 7326 7327 // Create fields 7328 for (unsigned i = 0; i < Count; ++i) { 7329 FieldDecl *Field = 7330 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7331 SourceLocation(), &Idents.get(Fields[i].Name), 7332 Fields[i].Type, /*TInfo=*/nullptr, 7333 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7334 Field->setAccess(AS_public); 7335 CFConstantStringTagDecl->addDecl(Field); 7336 } 7337 7338 CFConstantStringTagDecl->completeDefinition(); 7339 // This type is designed to be compatible with NSConstantString, but cannot 7340 // use the same name, since NSConstantString is an interface. 7341 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7342 CFConstantStringTypeDecl = 7343 buildImplicitTypedef(tagType, "__NSConstantString"); 7344 7345 return CFConstantStringTypeDecl; 7346 } 7347 7348 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7349 if (!CFConstantStringTagDecl) 7350 getCFConstantStringDecl(); // Build the tag and the typedef. 7351 return CFConstantStringTagDecl; 7352 } 7353 7354 // getCFConstantStringType - Return the type used for constant CFStrings. 7355 QualType ASTContext::getCFConstantStringType() const { 7356 return getTypedefType(getCFConstantStringDecl()); 7357 } 7358 7359 QualType ASTContext::getObjCSuperType() const { 7360 if (ObjCSuperType.isNull()) { 7361 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7362 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7363 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7364 } 7365 return ObjCSuperType; 7366 } 7367 7368 void ASTContext::setCFConstantStringType(QualType T) { 7369 const auto *TD = T->castAs<TypedefType>(); 7370 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7371 const auto *TagType = 7372 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7373 CFConstantStringTagDecl = TagType->getDecl(); 7374 } 7375 7376 QualType ASTContext::getBlockDescriptorType() const { 7377 if (BlockDescriptorType) 7378 return getTagDeclType(BlockDescriptorType); 7379 7380 RecordDecl *RD; 7381 // FIXME: Needs the FlagAppleBlock bit. 7382 RD = buildImplicitRecord("__block_descriptor"); 7383 RD->startDefinition(); 7384 7385 QualType FieldTypes[] = { 7386 UnsignedLongTy, 7387 UnsignedLongTy, 7388 }; 7389 7390 static const char *const FieldNames[] = { 7391 "reserved", 7392 "Size" 7393 }; 7394 7395 for (size_t i = 0; i < 2; ++i) { 7396 FieldDecl *Field = FieldDecl::Create( 7397 *this, RD, SourceLocation(), SourceLocation(), 7398 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7399 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7400 Field->setAccess(AS_public); 7401 RD->addDecl(Field); 7402 } 7403 7404 RD->completeDefinition(); 7405 7406 BlockDescriptorType = RD; 7407 7408 return getTagDeclType(BlockDescriptorType); 7409 } 7410 7411 QualType ASTContext::getBlockDescriptorExtendedType() const { 7412 if (BlockDescriptorExtendedType) 7413 return getTagDeclType(BlockDescriptorExtendedType); 7414 7415 RecordDecl *RD; 7416 // FIXME: Needs the FlagAppleBlock bit. 7417 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7418 RD->startDefinition(); 7419 7420 QualType FieldTypes[] = { 7421 UnsignedLongTy, 7422 UnsignedLongTy, 7423 getPointerType(VoidPtrTy), 7424 getPointerType(VoidPtrTy) 7425 }; 7426 7427 static const char *const FieldNames[] = { 7428 "reserved", 7429 "Size", 7430 "CopyFuncPtr", 7431 "DestroyFuncPtr" 7432 }; 7433 7434 for (size_t i = 0; i < 4; ++i) { 7435 FieldDecl *Field = FieldDecl::Create( 7436 *this, RD, SourceLocation(), SourceLocation(), 7437 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7438 /*BitWidth=*/nullptr, 7439 /*Mutable=*/false, ICIS_NoInit); 7440 Field->setAccess(AS_public); 7441 RD->addDecl(Field); 7442 } 7443 7444 RD->completeDefinition(); 7445 7446 BlockDescriptorExtendedType = RD; 7447 return getTagDeclType(BlockDescriptorExtendedType); 7448 } 7449 7450 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7451 const auto *BT = dyn_cast<BuiltinType>(T); 7452 7453 if (!BT) { 7454 if (isa<PipeType>(T)) 7455 return OCLTK_Pipe; 7456 7457 return OCLTK_Default; 7458 } 7459 7460 switch (BT->getKind()) { 7461 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7462 case BuiltinType::Id: \ 7463 return OCLTK_Image; 7464 #include "clang/Basic/OpenCLImageTypes.def" 7465 7466 case BuiltinType::OCLClkEvent: 7467 return OCLTK_ClkEvent; 7468 7469 case BuiltinType::OCLEvent: 7470 return OCLTK_Event; 7471 7472 case BuiltinType::OCLQueue: 7473 return OCLTK_Queue; 7474 7475 case BuiltinType::OCLReserveID: 7476 return OCLTK_ReserveID; 7477 7478 case BuiltinType::OCLSampler: 7479 return OCLTK_Sampler; 7480 7481 default: 7482 return OCLTK_Default; 7483 } 7484 } 7485 7486 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7487 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7488 } 7489 7490 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7491 /// requires copy/dispose. Note that this must match the logic 7492 /// in buildByrefHelpers. 7493 bool ASTContext::BlockRequiresCopying(QualType Ty, 7494 const VarDecl *D) { 7495 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7496 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7497 if (!copyExpr && record->hasTrivialDestructor()) return false; 7498 7499 return true; 7500 } 7501 7502 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7503 // move or destroy. 7504 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7505 return true; 7506 7507 if (!Ty->isObjCRetainableType()) return false; 7508 7509 Qualifiers qs = Ty.getQualifiers(); 7510 7511 // If we have lifetime, that dominates. 7512 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7513 switch (lifetime) { 7514 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7515 7516 // These are just bits as far as the runtime is concerned. 7517 case Qualifiers::OCL_ExplicitNone: 7518 case Qualifiers::OCL_Autoreleasing: 7519 return false; 7520 7521 // These cases should have been taken care of when checking the type's 7522 // non-triviality. 7523 case Qualifiers::OCL_Weak: 7524 case Qualifiers::OCL_Strong: 7525 llvm_unreachable("impossible"); 7526 } 7527 llvm_unreachable("fell out of lifetime switch!"); 7528 } 7529 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7530 Ty->isObjCObjectPointerType()); 7531 } 7532 7533 bool ASTContext::getByrefLifetime(QualType Ty, 7534 Qualifiers::ObjCLifetime &LifeTime, 7535 bool &HasByrefExtendedLayout) const { 7536 if (!getLangOpts().ObjC || 7537 getLangOpts().getGC() != LangOptions::NonGC) 7538 return false; 7539 7540 HasByrefExtendedLayout = false; 7541 if (Ty->isRecordType()) { 7542 HasByrefExtendedLayout = true; 7543 LifeTime = Qualifiers::OCL_None; 7544 } else if ((LifeTime = Ty.getObjCLifetime())) { 7545 // Honor the ARC qualifiers. 7546 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7547 // The MRR rule. 7548 LifeTime = Qualifiers::OCL_ExplicitNone; 7549 } else { 7550 LifeTime = Qualifiers::OCL_None; 7551 } 7552 return true; 7553 } 7554 7555 CanQualType ASTContext::getNSUIntegerType() const { 7556 assert(Target && "Expected target to be initialized"); 7557 const llvm::Triple &T = Target->getTriple(); 7558 // Windows is LLP64 rather than LP64 7559 if (T.isOSWindows() && T.isArch64Bit()) 7560 return UnsignedLongLongTy; 7561 return UnsignedLongTy; 7562 } 7563 7564 CanQualType ASTContext::getNSIntegerType() const { 7565 assert(Target && "Expected target to be initialized"); 7566 const llvm::Triple &T = Target->getTriple(); 7567 // Windows is LLP64 rather than LP64 7568 if (T.isOSWindows() && T.isArch64Bit()) 7569 return LongLongTy; 7570 return LongTy; 7571 } 7572 7573 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7574 if (!ObjCInstanceTypeDecl) 7575 ObjCInstanceTypeDecl = 7576 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7577 return ObjCInstanceTypeDecl; 7578 } 7579 7580 // This returns true if a type has been typedefed to BOOL: 7581 // typedef <type> BOOL; 7582 static bool isTypeTypedefedAsBOOL(QualType T) { 7583 if (const auto *TT = dyn_cast<TypedefType>(T)) 7584 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7585 return II->isStr("BOOL"); 7586 7587 return false; 7588 } 7589 7590 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7591 /// purpose. 7592 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7593 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7594 return CharUnits::Zero(); 7595 7596 CharUnits sz = getTypeSizeInChars(type); 7597 7598 // Make all integer and enum types at least as large as an int 7599 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7600 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7601 // Treat arrays as pointers, since that's how they're passed in. 7602 else if (type->isArrayType()) 7603 sz = getTypeSizeInChars(VoidPtrTy); 7604 return sz; 7605 } 7606 7607 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7608 return getTargetInfo().getCXXABI().isMicrosoft() && 7609 VD->isStaticDataMember() && 7610 VD->getType()->isIntegralOrEnumerationType() && 7611 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7612 } 7613 7614 ASTContext::InlineVariableDefinitionKind 7615 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7616 if (!VD->isInline()) 7617 return InlineVariableDefinitionKind::None; 7618 7619 // In almost all cases, it's a weak definition. 7620 auto *First = VD->getFirstDecl(); 7621 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7622 return InlineVariableDefinitionKind::Weak; 7623 7624 // If there's a file-context declaration in this translation unit, it's a 7625 // non-discardable definition. 7626 for (auto *D : VD->redecls()) 7627 if (D->getLexicalDeclContext()->isFileContext() && 7628 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7629 return InlineVariableDefinitionKind::Strong; 7630 7631 // If we've not seen one yet, we don't know. 7632 return InlineVariableDefinitionKind::WeakUnknown; 7633 } 7634 7635 static std::string charUnitsToString(const CharUnits &CU) { 7636 return llvm::itostr(CU.getQuantity()); 7637 } 7638 7639 /// getObjCEncodingForBlock - Return the encoded type for this block 7640 /// declaration. 7641 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7642 std::string S; 7643 7644 const BlockDecl *Decl = Expr->getBlockDecl(); 7645 QualType BlockTy = 7646 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7647 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7648 // Encode result type. 7649 if (getLangOpts().EncodeExtendedBlockSig) 7650 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7651 true /*Extended*/); 7652 else 7653 getObjCEncodingForType(BlockReturnTy, S); 7654 // Compute size of all parameters. 7655 // Start with computing size of a pointer in number of bytes. 7656 // FIXME: There might(should) be a better way of doing this computation! 7657 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7658 CharUnits ParmOffset = PtrSize; 7659 for (auto *PI : Decl->parameters()) { 7660 QualType PType = PI->getType(); 7661 CharUnits sz = getObjCEncodingTypeSize(PType); 7662 if (sz.isZero()) 7663 continue; 7664 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7665 ParmOffset += sz; 7666 } 7667 // Size of the argument frame 7668 S += charUnitsToString(ParmOffset); 7669 // Block pointer and offset. 7670 S += "@?0"; 7671 7672 // Argument types. 7673 ParmOffset = PtrSize; 7674 for (auto *PVDecl : Decl->parameters()) { 7675 QualType PType = PVDecl->getOriginalType(); 7676 if (const auto *AT = 7677 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7678 // Use array's original type only if it has known number of 7679 // elements. 7680 if (!isa<ConstantArrayType>(AT)) 7681 PType = PVDecl->getType(); 7682 } else if (PType->isFunctionType()) 7683 PType = PVDecl->getType(); 7684 if (getLangOpts().EncodeExtendedBlockSig) 7685 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7686 S, true /*Extended*/); 7687 else 7688 getObjCEncodingForType(PType, S); 7689 S += charUnitsToString(ParmOffset); 7690 ParmOffset += getObjCEncodingTypeSize(PType); 7691 } 7692 7693 return S; 7694 } 7695 7696 std::string 7697 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7698 std::string S; 7699 // Encode result type. 7700 getObjCEncodingForType(Decl->getReturnType(), S); 7701 CharUnits ParmOffset; 7702 // Compute size of all parameters. 7703 for (auto *PI : Decl->parameters()) { 7704 QualType PType = PI->getType(); 7705 CharUnits sz = getObjCEncodingTypeSize(PType); 7706 if (sz.isZero()) 7707 continue; 7708 7709 assert(sz.isPositive() && 7710 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7711 ParmOffset += sz; 7712 } 7713 S += charUnitsToString(ParmOffset); 7714 ParmOffset = CharUnits::Zero(); 7715 7716 // Argument types. 7717 for (auto *PVDecl : Decl->parameters()) { 7718 QualType PType = PVDecl->getOriginalType(); 7719 if (const auto *AT = 7720 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7721 // Use array's original type only if it has known number of 7722 // elements. 7723 if (!isa<ConstantArrayType>(AT)) 7724 PType = PVDecl->getType(); 7725 } else if (PType->isFunctionType()) 7726 PType = PVDecl->getType(); 7727 getObjCEncodingForType(PType, S); 7728 S += charUnitsToString(ParmOffset); 7729 ParmOffset += getObjCEncodingTypeSize(PType); 7730 } 7731 7732 return S; 7733 } 7734 7735 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7736 /// method parameter or return type. If Extended, include class names and 7737 /// block object types. 7738 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7739 QualType T, std::string& S, 7740 bool Extended) const { 7741 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7742 getObjCEncodingForTypeQualifier(QT, S); 7743 // Encode parameter type. 7744 ObjCEncOptions Options = ObjCEncOptions() 7745 .setExpandPointedToStructures() 7746 .setExpandStructures() 7747 .setIsOutermostType(); 7748 if (Extended) 7749 Options.setEncodeBlockParameters().setEncodeClassNames(); 7750 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7751 } 7752 7753 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7754 /// declaration. 7755 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7756 bool Extended) const { 7757 // FIXME: This is not very efficient. 7758 // Encode return type. 7759 std::string S; 7760 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7761 Decl->getReturnType(), S, Extended); 7762 // Compute size of all parameters. 7763 // Start with computing size of a pointer in number of bytes. 7764 // FIXME: There might(should) be a better way of doing this computation! 7765 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7766 // The first two arguments (self and _cmd) are pointers; account for 7767 // their size. 7768 CharUnits ParmOffset = 2 * PtrSize; 7769 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7770 E = Decl->sel_param_end(); PI != E; ++PI) { 7771 QualType PType = (*PI)->getType(); 7772 CharUnits sz = getObjCEncodingTypeSize(PType); 7773 if (sz.isZero()) 7774 continue; 7775 7776 assert(sz.isPositive() && 7777 "getObjCEncodingForMethodDecl - Incomplete param type"); 7778 ParmOffset += sz; 7779 } 7780 S += charUnitsToString(ParmOffset); 7781 S += "@0:"; 7782 S += charUnitsToString(PtrSize); 7783 7784 // Argument types. 7785 ParmOffset = 2 * PtrSize; 7786 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7787 E = Decl->sel_param_end(); PI != E; ++PI) { 7788 const ParmVarDecl *PVDecl = *PI; 7789 QualType PType = PVDecl->getOriginalType(); 7790 if (const auto *AT = 7791 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7792 // Use array's original type only if it has known number of 7793 // elements. 7794 if (!isa<ConstantArrayType>(AT)) 7795 PType = PVDecl->getType(); 7796 } else if (PType->isFunctionType()) 7797 PType = PVDecl->getType(); 7798 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7799 PType, S, Extended); 7800 S += charUnitsToString(ParmOffset); 7801 ParmOffset += getObjCEncodingTypeSize(PType); 7802 } 7803 7804 return S; 7805 } 7806 7807 ObjCPropertyImplDecl * 7808 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7809 const ObjCPropertyDecl *PD, 7810 const Decl *Container) const { 7811 if (!Container) 7812 return nullptr; 7813 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7814 for (auto *PID : CID->property_impls()) 7815 if (PID->getPropertyDecl() == PD) 7816 return PID; 7817 } else { 7818 const auto *OID = cast<ObjCImplementationDecl>(Container); 7819 for (auto *PID : OID->property_impls()) 7820 if (PID->getPropertyDecl() == PD) 7821 return PID; 7822 } 7823 return nullptr; 7824 } 7825 7826 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7827 /// property declaration. If non-NULL, Container must be either an 7828 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7829 /// NULL when getting encodings for protocol properties. 7830 /// Property attributes are stored as a comma-delimited C string. The simple 7831 /// attributes readonly and bycopy are encoded as single characters. The 7832 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7833 /// encoded as single characters, followed by an identifier. Property types 7834 /// are also encoded as a parametrized attribute. The characters used to encode 7835 /// these attributes are defined by the following enumeration: 7836 /// @code 7837 /// enum PropertyAttributes { 7838 /// kPropertyReadOnly = 'R', // property is read-only. 7839 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7840 /// kPropertyByref = '&', // property is a reference to the value last assigned 7841 /// kPropertyDynamic = 'D', // property is dynamic 7842 /// kPropertyGetter = 'G', // followed by getter selector name 7843 /// kPropertySetter = 'S', // followed by setter selector name 7844 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7845 /// kPropertyType = 'T' // followed by old-style type encoding. 7846 /// kPropertyWeak = 'W' // 'weak' property 7847 /// kPropertyStrong = 'P' // property GC'able 7848 /// kPropertyNonAtomic = 'N' // property non-atomic 7849 /// }; 7850 /// @endcode 7851 std::string 7852 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7853 const Decl *Container) const { 7854 // Collect information from the property implementation decl(s). 7855 bool Dynamic = false; 7856 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7857 7858 if (ObjCPropertyImplDecl *PropertyImpDecl = 7859 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7860 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7861 Dynamic = true; 7862 else 7863 SynthesizePID = PropertyImpDecl; 7864 } 7865 7866 // FIXME: This is not very efficient. 7867 std::string S = "T"; 7868 7869 // Encode result type. 7870 // GCC has some special rules regarding encoding of properties which 7871 // closely resembles encoding of ivars. 7872 getObjCEncodingForPropertyType(PD->getType(), S); 7873 7874 if (PD->isReadOnly()) { 7875 S += ",R"; 7876 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7877 S += ",C"; 7878 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7879 S += ",&"; 7880 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7881 S += ",W"; 7882 } else { 7883 switch (PD->getSetterKind()) { 7884 case ObjCPropertyDecl::Assign: break; 7885 case ObjCPropertyDecl::Copy: S += ",C"; break; 7886 case ObjCPropertyDecl::Retain: S += ",&"; break; 7887 case ObjCPropertyDecl::Weak: S += ",W"; break; 7888 } 7889 } 7890 7891 // It really isn't clear at all what this means, since properties 7892 // are "dynamic by default". 7893 if (Dynamic) 7894 S += ",D"; 7895 7896 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7897 S += ",N"; 7898 7899 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7900 S += ",G"; 7901 S += PD->getGetterName().getAsString(); 7902 } 7903 7904 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7905 S += ",S"; 7906 S += PD->getSetterName().getAsString(); 7907 } 7908 7909 if (SynthesizePID) { 7910 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7911 S += ",V"; 7912 S += OID->getNameAsString(); 7913 } 7914 7915 // FIXME: OBJCGC: weak & strong 7916 return S; 7917 } 7918 7919 /// getLegacyIntegralTypeEncoding - 7920 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7921 /// 'l' or 'L' , but not always. For typedefs, we need to use 7922 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7923 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7924 if (PointeeTy->getAs<TypedefType>()) { 7925 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7926 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7927 PointeeTy = UnsignedIntTy; 7928 else 7929 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7930 PointeeTy = IntTy; 7931 } 7932 } 7933 } 7934 7935 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7936 const FieldDecl *Field, 7937 QualType *NotEncodedT) const { 7938 // We follow the behavior of gcc, expanding structures which are 7939 // directly pointed to, and expanding embedded structures. Note that 7940 // these rules are sufficient to prevent recursive encoding of the 7941 // same type. 7942 getObjCEncodingForTypeImpl(T, S, 7943 ObjCEncOptions() 7944 .setExpandPointedToStructures() 7945 .setExpandStructures() 7946 .setIsOutermostType(), 7947 Field, NotEncodedT); 7948 } 7949 7950 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7951 std::string& S) const { 7952 // Encode result type. 7953 // GCC has some special rules regarding encoding of properties which 7954 // closely resembles encoding of ivars. 7955 getObjCEncodingForTypeImpl(T, S, 7956 ObjCEncOptions() 7957 .setExpandPointedToStructures() 7958 .setExpandStructures() 7959 .setIsOutermostType() 7960 .setEncodingProperty(), 7961 /*Field=*/nullptr); 7962 } 7963 7964 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7965 const BuiltinType *BT) { 7966 BuiltinType::Kind kind = BT->getKind(); 7967 switch (kind) { 7968 case BuiltinType::Void: return 'v'; 7969 case BuiltinType::Bool: return 'B'; 7970 case BuiltinType::Char8: 7971 case BuiltinType::Char_U: 7972 case BuiltinType::UChar: return 'C'; 7973 case BuiltinType::Char16: 7974 case BuiltinType::UShort: return 'S'; 7975 case BuiltinType::Char32: 7976 case BuiltinType::UInt: return 'I'; 7977 case BuiltinType::ULong: 7978 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7979 case BuiltinType::UInt128: return 'T'; 7980 case BuiltinType::ULongLong: return 'Q'; 7981 case BuiltinType::Char_S: 7982 case BuiltinType::SChar: return 'c'; 7983 case BuiltinType::Short: return 's'; 7984 case BuiltinType::WChar_S: 7985 case BuiltinType::WChar_U: 7986 case BuiltinType::Int: return 'i'; 7987 case BuiltinType::Long: 7988 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7989 case BuiltinType::LongLong: return 'q'; 7990 case BuiltinType::Int128: return 't'; 7991 case BuiltinType::Float: return 'f'; 7992 case BuiltinType::Double: return 'd'; 7993 case BuiltinType::LongDouble: return 'D'; 7994 case BuiltinType::NullPtr: return '*'; // like char* 7995 7996 case BuiltinType::BFloat16: 7997 case BuiltinType::Float16: 7998 case BuiltinType::Float128: 7999 case BuiltinType::Ibm128: 8000 case BuiltinType::Half: 8001 case BuiltinType::ShortAccum: 8002 case BuiltinType::Accum: 8003 case BuiltinType::LongAccum: 8004 case BuiltinType::UShortAccum: 8005 case BuiltinType::UAccum: 8006 case BuiltinType::ULongAccum: 8007 case BuiltinType::ShortFract: 8008 case BuiltinType::Fract: 8009 case BuiltinType::LongFract: 8010 case BuiltinType::UShortFract: 8011 case BuiltinType::UFract: 8012 case BuiltinType::ULongFract: 8013 case BuiltinType::SatShortAccum: 8014 case BuiltinType::SatAccum: 8015 case BuiltinType::SatLongAccum: 8016 case BuiltinType::SatUShortAccum: 8017 case BuiltinType::SatUAccum: 8018 case BuiltinType::SatULongAccum: 8019 case BuiltinType::SatShortFract: 8020 case BuiltinType::SatFract: 8021 case BuiltinType::SatLongFract: 8022 case BuiltinType::SatUShortFract: 8023 case BuiltinType::SatUFract: 8024 case BuiltinType::SatULongFract: 8025 // FIXME: potentially need @encodes for these! 8026 return ' '; 8027 8028 #define SVE_TYPE(Name, Id, SingletonId) \ 8029 case BuiltinType::Id: 8030 #include "clang/Basic/AArch64SVEACLETypes.def" 8031 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8032 #include "clang/Basic/RISCVVTypes.def" 8033 { 8034 DiagnosticsEngine &Diags = C->getDiagnostics(); 8035 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 8036 "cannot yet @encode type %0"); 8037 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 8038 return ' '; 8039 } 8040 8041 case BuiltinType::ObjCId: 8042 case BuiltinType::ObjCClass: 8043 case BuiltinType::ObjCSel: 8044 llvm_unreachable("@encoding ObjC primitive type"); 8045 8046 // OpenCL and placeholder types don't need @encodings. 8047 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 8048 case BuiltinType::Id: 8049 #include "clang/Basic/OpenCLImageTypes.def" 8050 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 8051 case BuiltinType::Id: 8052 #include "clang/Basic/OpenCLExtensionTypes.def" 8053 case BuiltinType::OCLEvent: 8054 case BuiltinType::OCLClkEvent: 8055 case BuiltinType::OCLQueue: 8056 case BuiltinType::OCLReserveID: 8057 case BuiltinType::OCLSampler: 8058 case BuiltinType::Dependent: 8059 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 8060 case BuiltinType::Id: 8061 #include "clang/Basic/PPCTypes.def" 8062 #define BUILTIN_TYPE(KIND, ID) 8063 #define PLACEHOLDER_TYPE(KIND, ID) \ 8064 case BuiltinType::KIND: 8065 #include "clang/AST/BuiltinTypes.def" 8066 llvm_unreachable("invalid builtin type for @encode"); 8067 } 8068 llvm_unreachable("invalid BuiltinType::Kind value"); 8069 } 8070 8071 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 8072 EnumDecl *Enum = ET->getDecl(); 8073 8074 // The encoding of an non-fixed enum type is always 'i', regardless of size. 8075 if (!Enum->isFixed()) 8076 return 'i'; 8077 8078 // The encoding of a fixed enum type matches its fixed underlying type. 8079 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 8080 return getObjCEncodingForPrimitiveType(C, BT); 8081 } 8082 8083 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 8084 QualType T, const FieldDecl *FD) { 8085 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 8086 S += 'b'; 8087 // The NeXT runtime encodes bit fields as b followed by the number of bits. 8088 // The GNU runtime requires more information; bitfields are encoded as b, 8089 // then the offset (in bits) of the first element, then the type of the 8090 // bitfield, then the size in bits. For example, in this structure: 8091 // 8092 // struct 8093 // { 8094 // int integer; 8095 // int flags:2; 8096 // }; 8097 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 8098 // runtime, but b32i2 for the GNU runtime. The reason for this extra 8099 // information is not especially sensible, but we're stuck with it for 8100 // compatibility with GCC, although providing it breaks anything that 8101 // actually uses runtime introspection and wants to work on both runtimes... 8102 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 8103 uint64_t Offset; 8104 8105 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 8106 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 8107 IVD); 8108 } else { 8109 const RecordDecl *RD = FD->getParent(); 8110 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 8111 Offset = RL.getFieldOffset(FD->getFieldIndex()); 8112 } 8113 8114 S += llvm::utostr(Offset); 8115 8116 if (const auto *ET = T->getAs<EnumType>()) 8117 S += ObjCEncodingForEnumType(Ctx, ET); 8118 else { 8119 const auto *BT = T->castAs<BuiltinType>(); 8120 S += getObjCEncodingForPrimitiveType(Ctx, BT); 8121 } 8122 } 8123 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 8124 } 8125 8126 // Helper function for determining whether the encoded type string would include 8127 // a template specialization type. 8128 static bool hasTemplateSpecializationInEncodedString(const Type *T, 8129 bool VisitBasesAndFields) { 8130 T = T->getBaseElementTypeUnsafe(); 8131 8132 if (auto *PT = T->getAs<PointerType>()) 8133 return hasTemplateSpecializationInEncodedString( 8134 PT->getPointeeType().getTypePtr(), false); 8135 8136 auto *CXXRD = T->getAsCXXRecordDecl(); 8137 8138 if (!CXXRD) 8139 return false; 8140 8141 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 8142 return true; 8143 8144 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 8145 return false; 8146 8147 for (auto B : CXXRD->bases()) 8148 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 8149 true)) 8150 return true; 8151 8152 for (auto *FD : CXXRD->fields()) 8153 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 8154 true)) 8155 return true; 8156 8157 return false; 8158 } 8159 8160 // FIXME: Use SmallString for accumulating string. 8161 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 8162 const ObjCEncOptions Options, 8163 const FieldDecl *FD, 8164 QualType *NotEncodedT) const { 8165 CanQualType CT = getCanonicalType(T); 8166 switch (CT->getTypeClass()) { 8167 case Type::Builtin: 8168 case Type::Enum: 8169 if (FD && FD->isBitField()) 8170 return EncodeBitField(this, S, T, FD); 8171 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 8172 S += getObjCEncodingForPrimitiveType(this, BT); 8173 else 8174 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8175 return; 8176 8177 case Type::Complex: 8178 S += 'j'; 8179 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8180 ObjCEncOptions(), 8181 /*Field=*/nullptr); 8182 return; 8183 8184 case Type::Atomic: 8185 S += 'A'; 8186 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8187 ObjCEncOptions(), 8188 /*Field=*/nullptr); 8189 return; 8190 8191 // encoding for pointer or reference types. 8192 case Type::Pointer: 8193 case Type::LValueReference: 8194 case Type::RValueReference: { 8195 QualType PointeeTy; 8196 if (isa<PointerType>(CT)) { 8197 const auto *PT = T->castAs<PointerType>(); 8198 if (PT->isObjCSelType()) { 8199 S += ':'; 8200 return; 8201 } 8202 PointeeTy = PT->getPointeeType(); 8203 } else { 8204 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8205 } 8206 8207 bool isReadOnly = false; 8208 // For historical/compatibility reasons, the read-only qualifier of the 8209 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8210 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8211 // Also, do not emit the 'r' for anything but the outermost type! 8212 if (T->getAs<TypedefType>()) { 8213 if (Options.IsOutermostType() && T.isConstQualified()) { 8214 isReadOnly = true; 8215 S += 'r'; 8216 } 8217 } else if (Options.IsOutermostType()) { 8218 QualType P = PointeeTy; 8219 while (auto PT = P->getAs<PointerType>()) 8220 P = PT->getPointeeType(); 8221 if (P.isConstQualified()) { 8222 isReadOnly = true; 8223 S += 'r'; 8224 } 8225 } 8226 if (isReadOnly) { 8227 // Another legacy compatibility encoding. Some ObjC qualifier and type 8228 // combinations need to be rearranged. 8229 // Rewrite "in const" from "nr" to "rn" 8230 if (StringRef(S).endswith("nr")) 8231 S.replace(S.end()-2, S.end(), "rn"); 8232 } 8233 8234 if (PointeeTy->isCharType()) { 8235 // char pointer types should be encoded as '*' unless it is a 8236 // type that has been typedef'd to 'BOOL'. 8237 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8238 S += '*'; 8239 return; 8240 } 8241 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8242 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8243 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8244 S += '#'; 8245 return; 8246 } 8247 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8248 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8249 S += '@'; 8250 return; 8251 } 8252 // If the encoded string for the class includes template names, just emit 8253 // "^v" for pointers to the class. 8254 if (getLangOpts().CPlusPlus && 8255 (!getLangOpts().EncodeCXXClassTemplateSpec && 8256 hasTemplateSpecializationInEncodedString( 8257 RTy, Options.ExpandPointedToStructures()))) { 8258 S += "^v"; 8259 return; 8260 } 8261 // fall through... 8262 } 8263 S += '^'; 8264 getLegacyIntegralTypeEncoding(PointeeTy); 8265 8266 ObjCEncOptions NewOptions; 8267 if (Options.ExpandPointedToStructures()) 8268 NewOptions.setExpandStructures(); 8269 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8270 /*Field=*/nullptr, NotEncodedT); 8271 return; 8272 } 8273 8274 case Type::ConstantArray: 8275 case Type::IncompleteArray: 8276 case Type::VariableArray: { 8277 const auto *AT = cast<ArrayType>(CT); 8278 8279 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8280 // Incomplete arrays are encoded as a pointer to the array element. 8281 S += '^'; 8282 8283 getObjCEncodingForTypeImpl( 8284 AT->getElementType(), S, 8285 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8286 } else { 8287 S += '['; 8288 8289 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8290 S += llvm::utostr(CAT->getSize().getZExtValue()); 8291 else { 8292 //Variable length arrays are encoded as a regular array with 0 elements. 8293 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8294 "Unknown array type!"); 8295 S += '0'; 8296 } 8297 8298 getObjCEncodingForTypeImpl( 8299 AT->getElementType(), S, 8300 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8301 NotEncodedT); 8302 S += ']'; 8303 } 8304 return; 8305 } 8306 8307 case Type::FunctionNoProto: 8308 case Type::FunctionProto: 8309 S += '?'; 8310 return; 8311 8312 case Type::Record: { 8313 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8314 S += RDecl->isUnion() ? '(' : '{'; 8315 // Anonymous structures print as '?' 8316 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8317 S += II->getName(); 8318 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8319 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8320 llvm::raw_string_ostream OS(S); 8321 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8322 getPrintingPolicy()); 8323 } 8324 } else { 8325 S += '?'; 8326 } 8327 if (Options.ExpandStructures()) { 8328 S += '='; 8329 if (!RDecl->isUnion()) { 8330 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8331 } else { 8332 for (const auto *Field : RDecl->fields()) { 8333 if (FD) { 8334 S += '"'; 8335 S += Field->getNameAsString(); 8336 S += '"'; 8337 } 8338 8339 // Special case bit-fields. 8340 if (Field->isBitField()) { 8341 getObjCEncodingForTypeImpl(Field->getType(), S, 8342 ObjCEncOptions().setExpandStructures(), 8343 Field); 8344 } else { 8345 QualType qt = Field->getType(); 8346 getLegacyIntegralTypeEncoding(qt); 8347 getObjCEncodingForTypeImpl( 8348 qt, S, 8349 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8350 NotEncodedT); 8351 } 8352 } 8353 } 8354 } 8355 S += RDecl->isUnion() ? ')' : '}'; 8356 return; 8357 } 8358 8359 case Type::BlockPointer: { 8360 const auto *BT = T->castAs<BlockPointerType>(); 8361 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8362 if (Options.EncodeBlockParameters()) { 8363 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8364 8365 S += '<'; 8366 // Block return type 8367 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8368 Options.forComponentType(), FD, NotEncodedT); 8369 // Block self 8370 S += "@?"; 8371 // Block parameters 8372 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8373 for (const auto &I : FPT->param_types()) 8374 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8375 NotEncodedT); 8376 } 8377 S += '>'; 8378 } 8379 return; 8380 } 8381 8382 case Type::ObjCObject: { 8383 // hack to match legacy encoding of *id and *Class 8384 QualType Ty = getObjCObjectPointerType(CT); 8385 if (Ty->isObjCIdType()) { 8386 S += "{objc_object=}"; 8387 return; 8388 } 8389 else if (Ty->isObjCClassType()) { 8390 S += "{objc_class=}"; 8391 return; 8392 } 8393 // TODO: Double check to make sure this intentionally falls through. 8394 [[fallthrough]]; 8395 } 8396 8397 case Type::ObjCInterface: { 8398 // Ignore protocol qualifiers when mangling at this level. 8399 // @encode(class_name) 8400 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8401 S += '{'; 8402 S += OI->getObjCRuntimeNameAsString(); 8403 if (Options.ExpandStructures()) { 8404 S += '='; 8405 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8406 DeepCollectObjCIvars(OI, true, Ivars); 8407 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8408 const FieldDecl *Field = Ivars[i]; 8409 if (Field->isBitField()) 8410 getObjCEncodingForTypeImpl(Field->getType(), S, 8411 ObjCEncOptions().setExpandStructures(), 8412 Field); 8413 else 8414 getObjCEncodingForTypeImpl(Field->getType(), S, 8415 ObjCEncOptions().setExpandStructures(), FD, 8416 NotEncodedT); 8417 } 8418 } 8419 S += '}'; 8420 return; 8421 } 8422 8423 case Type::ObjCObjectPointer: { 8424 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8425 if (OPT->isObjCIdType()) { 8426 S += '@'; 8427 return; 8428 } 8429 8430 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8431 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8432 // Since this is a binary compatibility issue, need to consult with 8433 // runtime folks. Fortunately, this is a *very* obscure construct. 8434 S += '#'; 8435 return; 8436 } 8437 8438 if (OPT->isObjCQualifiedIdType()) { 8439 getObjCEncodingForTypeImpl( 8440 getObjCIdType(), S, 8441 Options.keepingOnly(ObjCEncOptions() 8442 .setExpandPointedToStructures() 8443 .setExpandStructures()), 8444 FD); 8445 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8446 // Note that we do extended encoding of protocol qualifier list 8447 // Only when doing ivar or property encoding. 8448 S += '"'; 8449 for (const auto *I : OPT->quals()) { 8450 S += '<'; 8451 S += I->getObjCRuntimeNameAsString(); 8452 S += '>'; 8453 } 8454 S += '"'; 8455 } 8456 return; 8457 } 8458 8459 S += '@'; 8460 if (OPT->getInterfaceDecl() && 8461 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8462 S += '"'; 8463 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8464 for (const auto *I : OPT->quals()) { 8465 S += '<'; 8466 S += I->getObjCRuntimeNameAsString(); 8467 S += '>'; 8468 } 8469 S += '"'; 8470 } 8471 return; 8472 } 8473 8474 // gcc just blithely ignores member pointers. 8475 // FIXME: we should do better than that. 'M' is available. 8476 case Type::MemberPointer: 8477 // This matches gcc's encoding, even though technically it is insufficient. 8478 //FIXME. We should do a better job than gcc. 8479 case Type::Vector: 8480 case Type::ExtVector: 8481 // Until we have a coherent encoding of these three types, issue warning. 8482 if (NotEncodedT) 8483 *NotEncodedT = T; 8484 return; 8485 8486 case Type::ConstantMatrix: 8487 if (NotEncodedT) 8488 *NotEncodedT = T; 8489 return; 8490 8491 case Type::BitInt: 8492 if (NotEncodedT) 8493 *NotEncodedT = T; 8494 return; 8495 8496 // We could see an undeduced auto type here during error recovery. 8497 // Just ignore it. 8498 case Type::Auto: 8499 case Type::DeducedTemplateSpecialization: 8500 return; 8501 8502 case Type::Pipe: 8503 #define ABSTRACT_TYPE(KIND, BASE) 8504 #define TYPE(KIND, BASE) 8505 #define DEPENDENT_TYPE(KIND, BASE) \ 8506 case Type::KIND: 8507 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8508 case Type::KIND: 8509 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8510 case Type::KIND: 8511 #include "clang/AST/TypeNodes.inc" 8512 llvm_unreachable("@encode for dependent type!"); 8513 } 8514 llvm_unreachable("bad type kind!"); 8515 } 8516 8517 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8518 std::string &S, 8519 const FieldDecl *FD, 8520 bool includeVBases, 8521 QualType *NotEncodedT) const { 8522 assert(RDecl && "Expected non-null RecordDecl"); 8523 assert(!RDecl->isUnion() && "Should not be called for unions"); 8524 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8525 return; 8526 8527 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8528 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8529 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8530 8531 if (CXXRec) { 8532 for (const auto &BI : CXXRec->bases()) { 8533 if (!BI.isVirtual()) { 8534 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8535 if (base->isEmpty()) 8536 continue; 8537 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8538 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8539 std::make_pair(offs, base)); 8540 } 8541 } 8542 } 8543 8544 unsigned i = 0; 8545 for (FieldDecl *Field : RDecl->fields()) { 8546 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8547 continue; 8548 uint64_t offs = layout.getFieldOffset(i); 8549 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8550 std::make_pair(offs, Field)); 8551 ++i; 8552 } 8553 8554 if (CXXRec && includeVBases) { 8555 for (const auto &BI : CXXRec->vbases()) { 8556 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8557 if (base->isEmpty()) 8558 continue; 8559 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8560 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8561 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8562 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8563 std::make_pair(offs, base)); 8564 } 8565 } 8566 8567 CharUnits size; 8568 if (CXXRec) { 8569 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8570 } else { 8571 size = layout.getSize(); 8572 } 8573 8574 #ifndef NDEBUG 8575 uint64_t CurOffs = 0; 8576 #endif 8577 std::multimap<uint64_t, NamedDecl *>::iterator 8578 CurLayObj = FieldOrBaseOffsets.begin(); 8579 8580 if (CXXRec && CXXRec->isDynamicClass() && 8581 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8582 if (FD) { 8583 S += "\"_vptr$"; 8584 std::string recname = CXXRec->getNameAsString(); 8585 if (recname.empty()) recname = "?"; 8586 S += recname; 8587 S += '"'; 8588 } 8589 S += "^^?"; 8590 #ifndef NDEBUG 8591 CurOffs += getTypeSize(VoidPtrTy); 8592 #endif 8593 } 8594 8595 if (!RDecl->hasFlexibleArrayMember()) { 8596 // Mark the end of the structure. 8597 uint64_t offs = toBits(size); 8598 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8599 std::make_pair(offs, nullptr)); 8600 } 8601 8602 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8603 #ifndef NDEBUG 8604 assert(CurOffs <= CurLayObj->first); 8605 if (CurOffs < CurLayObj->first) { 8606 uint64_t padding = CurLayObj->first - CurOffs; 8607 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8608 // packing/alignment of members is different that normal, in which case 8609 // the encoding will be out-of-sync with the real layout. 8610 // If the runtime switches to just consider the size of types without 8611 // taking into account alignment, we could make padding explicit in the 8612 // encoding (e.g. using arrays of chars). The encoding strings would be 8613 // longer then though. 8614 CurOffs += padding; 8615 } 8616 #endif 8617 8618 NamedDecl *dcl = CurLayObj->second; 8619 if (!dcl) 8620 break; // reached end of structure. 8621 8622 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8623 // We expand the bases without their virtual bases since those are going 8624 // in the initial structure. Note that this differs from gcc which 8625 // expands virtual bases each time one is encountered in the hierarchy, 8626 // making the encoding type bigger than it really is. 8627 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8628 NotEncodedT); 8629 assert(!base->isEmpty()); 8630 #ifndef NDEBUG 8631 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8632 #endif 8633 } else { 8634 const auto *field = cast<FieldDecl>(dcl); 8635 if (FD) { 8636 S += '"'; 8637 S += field->getNameAsString(); 8638 S += '"'; 8639 } 8640 8641 if (field->isBitField()) { 8642 EncodeBitField(this, S, field->getType(), field); 8643 #ifndef NDEBUG 8644 CurOffs += field->getBitWidthValue(*this); 8645 #endif 8646 } else { 8647 QualType qt = field->getType(); 8648 getLegacyIntegralTypeEncoding(qt); 8649 getObjCEncodingForTypeImpl( 8650 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8651 FD, NotEncodedT); 8652 #ifndef NDEBUG 8653 CurOffs += getTypeSize(field->getType()); 8654 #endif 8655 } 8656 } 8657 } 8658 } 8659 8660 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8661 std::string& S) const { 8662 if (QT & Decl::OBJC_TQ_In) 8663 S += 'n'; 8664 if (QT & Decl::OBJC_TQ_Inout) 8665 S += 'N'; 8666 if (QT & Decl::OBJC_TQ_Out) 8667 S += 'o'; 8668 if (QT & Decl::OBJC_TQ_Bycopy) 8669 S += 'O'; 8670 if (QT & Decl::OBJC_TQ_Byref) 8671 S += 'R'; 8672 if (QT & Decl::OBJC_TQ_Oneway) 8673 S += 'V'; 8674 } 8675 8676 TypedefDecl *ASTContext::getObjCIdDecl() const { 8677 if (!ObjCIdDecl) { 8678 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8679 T = getObjCObjectPointerType(T); 8680 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8681 } 8682 return ObjCIdDecl; 8683 } 8684 8685 TypedefDecl *ASTContext::getObjCSelDecl() const { 8686 if (!ObjCSelDecl) { 8687 QualType T = getPointerType(ObjCBuiltinSelTy); 8688 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8689 } 8690 return ObjCSelDecl; 8691 } 8692 8693 TypedefDecl *ASTContext::getObjCClassDecl() const { 8694 if (!ObjCClassDecl) { 8695 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8696 T = getObjCObjectPointerType(T); 8697 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8698 } 8699 return ObjCClassDecl; 8700 } 8701 8702 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8703 if (!ObjCProtocolClassDecl) { 8704 ObjCProtocolClassDecl 8705 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8706 SourceLocation(), 8707 &Idents.get("Protocol"), 8708 /*typeParamList=*/nullptr, 8709 /*PrevDecl=*/nullptr, 8710 SourceLocation(), true); 8711 } 8712 8713 return ObjCProtocolClassDecl; 8714 } 8715 8716 //===----------------------------------------------------------------------===// 8717 // __builtin_va_list Construction Functions 8718 //===----------------------------------------------------------------------===// 8719 8720 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8721 StringRef Name) { 8722 // typedef char* __builtin[_ms]_va_list; 8723 QualType T = Context->getPointerType(Context->CharTy); 8724 return Context->buildImplicitTypedef(T, Name); 8725 } 8726 8727 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8728 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8729 } 8730 8731 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8732 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8733 } 8734 8735 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8736 // typedef void* __builtin_va_list; 8737 QualType T = Context->getPointerType(Context->VoidTy); 8738 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8739 } 8740 8741 static TypedefDecl * 8742 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8743 // struct __va_list 8744 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8745 if (Context->getLangOpts().CPlusPlus) { 8746 // namespace std { struct __va_list { 8747 auto *NS = NamespaceDecl::Create( 8748 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8749 /*Inline=*/false, SourceLocation(), SourceLocation(), 8750 &Context->Idents.get("std"), 8751 /*PrevDecl=*/nullptr, /*Nested=*/false); 8752 NS->setImplicit(); 8753 VaListTagDecl->setDeclContext(NS); 8754 } 8755 8756 VaListTagDecl->startDefinition(); 8757 8758 const size_t NumFields = 5; 8759 QualType FieldTypes[NumFields]; 8760 const char *FieldNames[NumFields]; 8761 8762 // void *__stack; 8763 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8764 FieldNames[0] = "__stack"; 8765 8766 // void *__gr_top; 8767 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8768 FieldNames[1] = "__gr_top"; 8769 8770 // void *__vr_top; 8771 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8772 FieldNames[2] = "__vr_top"; 8773 8774 // int __gr_offs; 8775 FieldTypes[3] = Context->IntTy; 8776 FieldNames[3] = "__gr_offs"; 8777 8778 // int __vr_offs; 8779 FieldTypes[4] = Context->IntTy; 8780 FieldNames[4] = "__vr_offs"; 8781 8782 // Create fields 8783 for (unsigned i = 0; i < NumFields; ++i) { 8784 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8785 VaListTagDecl, 8786 SourceLocation(), 8787 SourceLocation(), 8788 &Context->Idents.get(FieldNames[i]), 8789 FieldTypes[i], /*TInfo=*/nullptr, 8790 /*BitWidth=*/nullptr, 8791 /*Mutable=*/false, 8792 ICIS_NoInit); 8793 Field->setAccess(AS_public); 8794 VaListTagDecl->addDecl(Field); 8795 } 8796 VaListTagDecl->completeDefinition(); 8797 Context->VaListTagDecl = VaListTagDecl; 8798 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8799 8800 // } __builtin_va_list; 8801 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8802 } 8803 8804 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8805 // typedef struct __va_list_tag { 8806 RecordDecl *VaListTagDecl; 8807 8808 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8809 VaListTagDecl->startDefinition(); 8810 8811 const size_t NumFields = 5; 8812 QualType FieldTypes[NumFields]; 8813 const char *FieldNames[NumFields]; 8814 8815 // unsigned char gpr; 8816 FieldTypes[0] = Context->UnsignedCharTy; 8817 FieldNames[0] = "gpr"; 8818 8819 // unsigned char fpr; 8820 FieldTypes[1] = Context->UnsignedCharTy; 8821 FieldNames[1] = "fpr"; 8822 8823 // unsigned short reserved; 8824 FieldTypes[2] = Context->UnsignedShortTy; 8825 FieldNames[2] = "reserved"; 8826 8827 // void* overflow_arg_area; 8828 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8829 FieldNames[3] = "overflow_arg_area"; 8830 8831 // void* reg_save_area; 8832 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8833 FieldNames[4] = "reg_save_area"; 8834 8835 // Create fields 8836 for (unsigned i = 0; i < NumFields; ++i) { 8837 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8838 SourceLocation(), 8839 SourceLocation(), 8840 &Context->Idents.get(FieldNames[i]), 8841 FieldTypes[i], /*TInfo=*/nullptr, 8842 /*BitWidth=*/nullptr, 8843 /*Mutable=*/false, 8844 ICIS_NoInit); 8845 Field->setAccess(AS_public); 8846 VaListTagDecl->addDecl(Field); 8847 } 8848 VaListTagDecl->completeDefinition(); 8849 Context->VaListTagDecl = VaListTagDecl; 8850 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8851 8852 // } __va_list_tag; 8853 TypedefDecl *VaListTagTypedefDecl = 8854 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8855 8856 QualType VaListTagTypedefType = 8857 Context->getTypedefType(VaListTagTypedefDecl); 8858 8859 // typedef __va_list_tag __builtin_va_list[1]; 8860 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8861 QualType VaListTagArrayType 8862 = Context->getConstantArrayType(VaListTagTypedefType, 8863 Size, nullptr, ArrayType::Normal, 0); 8864 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8865 } 8866 8867 static TypedefDecl * 8868 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8869 // struct __va_list_tag { 8870 RecordDecl *VaListTagDecl; 8871 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8872 VaListTagDecl->startDefinition(); 8873 8874 const size_t NumFields = 4; 8875 QualType FieldTypes[NumFields]; 8876 const char *FieldNames[NumFields]; 8877 8878 // unsigned gp_offset; 8879 FieldTypes[0] = Context->UnsignedIntTy; 8880 FieldNames[0] = "gp_offset"; 8881 8882 // unsigned fp_offset; 8883 FieldTypes[1] = Context->UnsignedIntTy; 8884 FieldNames[1] = "fp_offset"; 8885 8886 // void* overflow_arg_area; 8887 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8888 FieldNames[2] = "overflow_arg_area"; 8889 8890 // void* reg_save_area; 8891 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8892 FieldNames[3] = "reg_save_area"; 8893 8894 // Create fields 8895 for (unsigned i = 0; i < NumFields; ++i) { 8896 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8897 VaListTagDecl, 8898 SourceLocation(), 8899 SourceLocation(), 8900 &Context->Idents.get(FieldNames[i]), 8901 FieldTypes[i], /*TInfo=*/nullptr, 8902 /*BitWidth=*/nullptr, 8903 /*Mutable=*/false, 8904 ICIS_NoInit); 8905 Field->setAccess(AS_public); 8906 VaListTagDecl->addDecl(Field); 8907 } 8908 VaListTagDecl->completeDefinition(); 8909 Context->VaListTagDecl = VaListTagDecl; 8910 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8911 8912 // }; 8913 8914 // typedef struct __va_list_tag __builtin_va_list[1]; 8915 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8916 QualType VaListTagArrayType = Context->getConstantArrayType( 8917 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8918 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8919 } 8920 8921 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8922 // typedef int __builtin_va_list[4]; 8923 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8924 QualType IntArrayType = Context->getConstantArrayType( 8925 Context->IntTy, Size, nullptr, ArrayType::Normal, 0); 8926 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8927 } 8928 8929 static TypedefDecl * 8930 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8931 // struct __va_list 8932 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8933 if (Context->getLangOpts().CPlusPlus) { 8934 // namespace std { struct __va_list { 8935 NamespaceDecl *NS; 8936 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8937 Context->getTranslationUnitDecl(), 8938 /*Inline=*/false, SourceLocation(), 8939 SourceLocation(), &Context->Idents.get("std"), 8940 /*PrevDecl=*/nullptr, /*Nested=*/false); 8941 NS->setImplicit(); 8942 VaListDecl->setDeclContext(NS); 8943 } 8944 8945 VaListDecl->startDefinition(); 8946 8947 // void * __ap; 8948 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8949 VaListDecl, 8950 SourceLocation(), 8951 SourceLocation(), 8952 &Context->Idents.get("__ap"), 8953 Context->getPointerType(Context->VoidTy), 8954 /*TInfo=*/nullptr, 8955 /*BitWidth=*/nullptr, 8956 /*Mutable=*/false, 8957 ICIS_NoInit); 8958 Field->setAccess(AS_public); 8959 VaListDecl->addDecl(Field); 8960 8961 // }; 8962 VaListDecl->completeDefinition(); 8963 Context->VaListTagDecl = VaListDecl; 8964 8965 // typedef struct __va_list __builtin_va_list; 8966 QualType T = Context->getRecordType(VaListDecl); 8967 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8968 } 8969 8970 static TypedefDecl * 8971 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8972 // struct __va_list_tag { 8973 RecordDecl *VaListTagDecl; 8974 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8975 VaListTagDecl->startDefinition(); 8976 8977 const size_t NumFields = 4; 8978 QualType FieldTypes[NumFields]; 8979 const char *FieldNames[NumFields]; 8980 8981 // long __gpr; 8982 FieldTypes[0] = Context->LongTy; 8983 FieldNames[0] = "__gpr"; 8984 8985 // long __fpr; 8986 FieldTypes[1] = Context->LongTy; 8987 FieldNames[1] = "__fpr"; 8988 8989 // void *__overflow_arg_area; 8990 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8991 FieldNames[2] = "__overflow_arg_area"; 8992 8993 // void *__reg_save_area; 8994 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8995 FieldNames[3] = "__reg_save_area"; 8996 8997 // Create fields 8998 for (unsigned i = 0; i < NumFields; ++i) { 8999 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 9000 VaListTagDecl, 9001 SourceLocation(), 9002 SourceLocation(), 9003 &Context->Idents.get(FieldNames[i]), 9004 FieldTypes[i], /*TInfo=*/nullptr, 9005 /*BitWidth=*/nullptr, 9006 /*Mutable=*/false, 9007 ICIS_NoInit); 9008 Field->setAccess(AS_public); 9009 VaListTagDecl->addDecl(Field); 9010 } 9011 VaListTagDecl->completeDefinition(); 9012 Context->VaListTagDecl = VaListTagDecl; 9013 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9014 9015 // }; 9016 9017 // typedef __va_list_tag __builtin_va_list[1]; 9018 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9019 QualType VaListTagArrayType = Context->getConstantArrayType( 9020 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 9021 9022 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9023 } 9024 9025 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 9026 // typedef struct __va_list_tag { 9027 RecordDecl *VaListTagDecl; 9028 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9029 VaListTagDecl->startDefinition(); 9030 9031 const size_t NumFields = 3; 9032 QualType FieldTypes[NumFields]; 9033 const char *FieldNames[NumFields]; 9034 9035 // void *CurrentSavedRegisterArea; 9036 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 9037 FieldNames[0] = "__current_saved_reg_area_pointer"; 9038 9039 // void *SavedRegAreaEnd; 9040 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 9041 FieldNames[1] = "__saved_reg_area_end_pointer"; 9042 9043 // void *OverflowArea; 9044 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9045 FieldNames[2] = "__overflow_area_pointer"; 9046 9047 // Create fields 9048 for (unsigned i = 0; i < NumFields; ++i) { 9049 FieldDecl *Field = FieldDecl::Create( 9050 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 9051 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 9052 /*TInfo=*/nullptr, 9053 /*BitWidth=*/nullptr, 9054 /*Mutable=*/false, ICIS_NoInit); 9055 Field->setAccess(AS_public); 9056 VaListTagDecl->addDecl(Field); 9057 } 9058 VaListTagDecl->completeDefinition(); 9059 Context->VaListTagDecl = VaListTagDecl; 9060 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9061 9062 // } __va_list_tag; 9063 TypedefDecl *VaListTagTypedefDecl = 9064 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 9065 9066 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 9067 9068 // typedef __va_list_tag __builtin_va_list[1]; 9069 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9070 QualType VaListTagArrayType = Context->getConstantArrayType( 9071 VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); 9072 9073 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9074 } 9075 9076 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 9077 TargetInfo::BuiltinVaListKind Kind) { 9078 switch (Kind) { 9079 case TargetInfo::CharPtrBuiltinVaList: 9080 return CreateCharPtrBuiltinVaListDecl(Context); 9081 case TargetInfo::VoidPtrBuiltinVaList: 9082 return CreateVoidPtrBuiltinVaListDecl(Context); 9083 case TargetInfo::AArch64ABIBuiltinVaList: 9084 return CreateAArch64ABIBuiltinVaListDecl(Context); 9085 case TargetInfo::PowerABIBuiltinVaList: 9086 return CreatePowerABIBuiltinVaListDecl(Context); 9087 case TargetInfo::X86_64ABIBuiltinVaList: 9088 return CreateX86_64ABIBuiltinVaListDecl(Context); 9089 case TargetInfo::PNaClABIBuiltinVaList: 9090 return CreatePNaClABIBuiltinVaListDecl(Context); 9091 case TargetInfo::AAPCSABIBuiltinVaList: 9092 return CreateAAPCSABIBuiltinVaListDecl(Context); 9093 case TargetInfo::SystemZBuiltinVaList: 9094 return CreateSystemZBuiltinVaListDecl(Context); 9095 case TargetInfo::HexagonBuiltinVaList: 9096 return CreateHexagonBuiltinVaListDecl(Context); 9097 } 9098 9099 llvm_unreachable("Unhandled __builtin_va_list type kind"); 9100 } 9101 9102 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 9103 if (!BuiltinVaListDecl) { 9104 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 9105 assert(BuiltinVaListDecl->isImplicit()); 9106 } 9107 9108 return BuiltinVaListDecl; 9109 } 9110 9111 Decl *ASTContext::getVaListTagDecl() const { 9112 // Force the creation of VaListTagDecl by building the __builtin_va_list 9113 // declaration. 9114 if (!VaListTagDecl) 9115 (void)getBuiltinVaListDecl(); 9116 9117 return VaListTagDecl; 9118 } 9119 9120 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 9121 if (!BuiltinMSVaListDecl) 9122 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 9123 9124 return BuiltinMSVaListDecl; 9125 } 9126 9127 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 9128 // Allow redecl custom type checking builtin for HLSL. 9129 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin && 9130 BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID())) 9131 return true; 9132 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 9133 } 9134 9135 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 9136 assert(ObjCConstantStringType.isNull() && 9137 "'NSConstantString' type already set!"); 9138 9139 ObjCConstantStringType = getObjCInterfaceType(Decl); 9140 } 9141 9142 /// Retrieve the template name that corresponds to a non-empty 9143 /// lookup. 9144 TemplateName 9145 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 9146 UnresolvedSetIterator End) const { 9147 unsigned size = End - Begin; 9148 assert(size > 1 && "set is not overloaded!"); 9149 9150 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 9151 size * sizeof(FunctionTemplateDecl*)); 9152 auto *OT = new (memory) OverloadedTemplateStorage(size); 9153 9154 NamedDecl **Storage = OT->getStorage(); 9155 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 9156 NamedDecl *D = *I; 9157 assert(isa<FunctionTemplateDecl>(D) || 9158 isa<UnresolvedUsingValueDecl>(D) || 9159 (isa<UsingShadowDecl>(D) && 9160 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 9161 *Storage++ = D; 9162 } 9163 9164 return TemplateName(OT); 9165 } 9166 9167 /// Retrieve a template name representing an unqualified-id that has been 9168 /// assumed to name a template for ADL purposes. 9169 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 9170 auto *OT = new (*this) AssumedTemplateStorage(Name); 9171 return TemplateName(OT); 9172 } 9173 9174 /// Retrieve the template name that represents a qualified 9175 /// template name such as \c std::vector. 9176 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 9177 bool TemplateKeyword, 9178 TemplateName Template) const { 9179 assert(NNS && "Missing nested-name-specifier in qualified template name"); 9180 9181 // FIXME: Canonicalization? 9182 llvm::FoldingSetNodeID ID; 9183 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9184 9185 void *InsertPos = nullptr; 9186 QualifiedTemplateName *QTN = 9187 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9188 if (!QTN) { 9189 QTN = new (*this, alignof(QualifiedTemplateName)) 9190 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9191 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9192 } 9193 9194 return TemplateName(QTN); 9195 } 9196 9197 /// Retrieve the template name that represents a dependent 9198 /// template name such as \c MetaFun::template apply. 9199 TemplateName 9200 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9201 const IdentifierInfo *Name) const { 9202 assert((!NNS || NNS->isDependent()) && 9203 "Nested name specifier must be dependent"); 9204 9205 llvm::FoldingSetNodeID ID; 9206 DependentTemplateName::Profile(ID, NNS, Name); 9207 9208 void *InsertPos = nullptr; 9209 DependentTemplateName *QTN = 9210 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9211 9212 if (QTN) 9213 return TemplateName(QTN); 9214 9215 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9216 if (CanonNNS == NNS) { 9217 QTN = new (*this, alignof(DependentTemplateName)) 9218 DependentTemplateName(NNS, Name); 9219 } else { 9220 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9221 QTN = new (*this, alignof(DependentTemplateName)) 9222 DependentTemplateName(NNS, Name, Canon); 9223 DependentTemplateName *CheckQTN = 9224 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9225 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9226 (void)CheckQTN; 9227 } 9228 9229 DependentTemplateNames.InsertNode(QTN, InsertPos); 9230 return TemplateName(QTN); 9231 } 9232 9233 /// Retrieve the template name that represents a dependent 9234 /// template name such as \c MetaFun::template operator+. 9235 TemplateName 9236 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9237 OverloadedOperatorKind Operator) const { 9238 assert((!NNS || NNS->isDependent()) && 9239 "Nested name specifier must be dependent"); 9240 9241 llvm::FoldingSetNodeID ID; 9242 DependentTemplateName::Profile(ID, NNS, Operator); 9243 9244 void *InsertPos = nullptr; 9245 DependentTemplateName *QTN 9246 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9247 9248 if (QTN) 9249 return TemplateName(QTN); 9250 9251 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9252 if (CanonNNS == NNS) { 9253 QTN = new (*this, alignof(DependentTemplateName)) 9254 DependentTemplateName(NNS, Operator); 9255 } else { 9256 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9257 QTN = new (*this, alignof(DependentTemplateName)) 9258 DependentTemplateName(NNS, Operator, Canon); 9259 9260 DependentTemplateName *CheckQTN 9261 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9262 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9263 (void)CheckQTN; 9264 } 9265 9266 DependentTemplateNames.InsertNode(QTN, InsertPos); 9267 return TemplateName(QTN); 9268 } 9269 9270 TemplateName ASTContext::getSubstTemplateTemplateParm( 9271 TemplateName Replacement, Decl *AssociatedDecl, unsigned Index, 9272 std::optional<unsigned> PackIndex) const { 9273 llvm::FoldingSetNodeID ID; 9274 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl, 9275 Index, PackIndex); 9276 9277 void *insertPos = nullptr; 9278 SubstTemplateTemplateParmStorage *subst 9279 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9280 9281 if (!subst) { 9282 subst = new (*this) SubstTemplateTemplateParmStorage( 9283 Replacement, AssociatedDecl, Index, PackIndex); 9284 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9285 } 9286 9287 return TemplateName(subst); 9288 } 9289 9290 TemplateName 9291 ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack, 9292 Decl *AssociatedDecl, 9293 unsigned Index, bool Final) const { 9294 auto &Self = const_cast<ASTContext &>(*this); 9295 llvm::FoldingSetNodeID ID; 9296 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack, 9297 AssociatedDecl, Index, Final); 9298 9299 void *InsertPos = nullptr; 9300 SubstTemplateTemplateParmPackStorage *Subst 9301 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9302 9303 if (!Subst) { 9304 Subst = new (*this) SubstTemplateTemplateParmPackStorage( 9305 ArgPack.pack_elements(), AssociatedDecl, Index, Final); 9306 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9307 } 9308 9309 return TemplateName(Subst); 9310 } 9311 9312 /// getFromTargetType - Given one of the integer types provided by 9313 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9314 /// is actually a value of type @c TargetInfo::IntType. 9315 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9316 switch (Type) { 9317 case TargetInfo::NoInt: return {}; 9318 case TargetInfo::SignedChar: return SignedCharTy; 9319 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9320 case TargetInfo::SignedShort: return ShortTy; 9321 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9322 case TargetInfo::SignedInt: return IntTy; 9323 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9324 case TargetInfo::SignedLong: return LongTy; 9325 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9326 case TargetInfo::SignedLongLong: return LongLongTy; 9327 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9328 } 9329 9330 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9331 } 9332 9333 //===----------------------------------------------------------------------===// 9334 // Type Predicates. 9335 //===----------------------------------------------------------------------===// 9336 9337 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9338 /// garbage collection attribute. 9339 /// 9340 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9341 if (getLangOpts().getGC() == LangOptions::NonGC) 9342 return Qualifiers::GCNone; 9343 9344 assert(getLangOpts().ObjC); 9345 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9346 9347 // Default behaviour under objective-C's gc is for ObjC pointers 9348 // (or pointers to them) be treated as though they were declared 9349 // as __strong. 9350 if (GCAttrs == Qualifiers::GCNone) { 9351 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9352 return Qualifiers::Strong; 9353 else if (Ty->isPointerType()) 9354 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9355 } else { 9356 // It's not valid to set GC attributes on anything that isn't a 9357 // pointer. 9358 #ifndef NDEBUG 9359 QualType CT = Ty->getCanonicalTypeInternal(); 9360 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9361 CT = AT->getElementType(); 9362 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9363 #endif 9364 } 9365 return GCAttrs; 9366 } 9367 9368 //===----------------------------------------------------------------------===// 9369 // Type Compatibility Testing 9370 //===----------------------------------------------------------------------===// 9371 9372 /// areCompatVectorTypes - Return true if the two specified vector types are 9373 /// compatible. 9374 static bool areCompatVectorTypes(const VectorType *LHS, 9375 const VectorType *RHS) { 9376 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9377 return LHS->getElementType() == RHS->getElementType() && 9378 LHS->getNumElements() == RHS->getNumElements(); 9379 } 9380 9381 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9382 /// compatible. 9383 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9384 const ConstantMatrixType *RHS) { 9385 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9386 return LHS->getElementType() == RHS->getElementType() && 9387 LHS->getNumRows() == RHS->getNumRows() && 9388 LHS->getNumColumns() == RHS->getNumColumns(); 9389 } 9390 9391 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9392 QualType SecondVec) { 9393 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9394 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9395 9396 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9397 return true; 9398 9399 // Treat Neon vector types and most AltiVec vector types as if they are the 9400 // equivalent GCC vector types. 9401 const auto *First = FirstVec->castAs<VectorType>(); 9402 const auto *Second = SecondVec->castAs<VectorType>(); 9403 if (First->getNumElements() == Second->getNumElements() && 9404 hasSameType(First->getElementType(), Second->getElementType()) && 9405 First->getVectorKind() != VectorType::AltiVecPixel && 9406 First->getVectorKind() != VectorType::AltiVecBool && 9407 Second->getVectorKind() != VectorType::AltiVecPixel && 9408 Second->getVectorKind() != VectorType::AltiVecBool && 9409 First->getVectorKind() != VectorType::SveFixedLengthDataVector && 9410 First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && 9411 Second->getVectorKind() != VectorType::SveFixedLengthDataVector && 9412 Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) 9413 return true; 9414 9415 return false; 9416 } 9417 9418 /// getSVETypeSize - Return SVE vector or predicate register size. 9419 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9420 assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); 9421 return Ty->getKind() == BuiltinType::SveBool 9422 ? (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth() 9423 : Context.getLangOpts().VScaleMin * 128; 9424 } 9425 9426 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9427 QualType SecondType) { 9428 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 9429 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 9430 "Expected SVE builtin type and vector type!"); 9431 9432 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9433 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9434 if (const auto *VT = SecondType->getAs<VectorType>()) { 9435 // Predicates have the same representation as uint8 so we also have to 9436 // check the kind to make these types incompatible. 9437 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 9438 return BT->getKind() == BuiltinType::SveBool; 9439 else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 9440 return VT->getElementType().getCanonicalType() == 9441 FirstType->getSveEltType(*this); 9442 else if (VT->getVectorKind() == VectorType::GenericVector) 9443 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9444 hasSameType(VT->getElementType(), 9445 getBuiltinVectorTypeInfo(BT).ElementType); 9446 } 9447 } 9448 return false; 9449 }; 9450 9451 return IsValidCast(FirstType, SecondType) || 9452 IsValidCast(SecondType, FirstType); 9453 } 9454 9455 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9456 QualType SecondType) { 9457 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 9458 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 9459 "Expected SVE builtin type and vector type!"); 9460 9461 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9462 const auto *BT = FirstType->getAs<BuiltinType>(); 9463 if (!BT) 9464 return false; 9465 9466 const auto *VecTy = SecondType->getAs<VectorType>(); 9467 if (VecTy && 9468 (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || 9469 VecTy->getVectorKind() == VectorType::GenericVector)) { 9470 const LangOptions::LaxVectorConversionKind LVCKind = 9471 getLangOpts().getLaxVectorConversions(); 9472 9473 // Can not convert between sve predicates and sve vectors because of 9474 // different size. 9475 if (BT->getKind() == BuiltinType::SveBool && 9476 VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) 9477 return false; 9478 9479 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9480 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9481 // converts to VLAT and VLAT implicitly converts to GNUT." 9482 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9483 // predicates. 9484 if (VecTy->getVectorKind() == VectorType::GenericVector && 9485 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9486 return false; 9487 9488 // If -flax-vector-conversions=all is specified, the types are 9489 // certainly compatible. 9490 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9491 return true; 9492 9493 // If -flax-vector-conversions=integer is specified, the types are 9494 // compatible if the elements are integer types. 9495 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9496 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9497 FirstType->getSveEltType(*this)->isIntegerType(); 9498 } 9499 9500 return false; 9501 }; 9502 9503 return IsLaxCompatible(FirstType, SecondType) || 9504 IsLaxCompatible(SecondType, FirstType); 9505 } 9506 9507 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9508 while (true) { 9509 // __strong id 9510 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9511 if (Attr->getAttrKind() == attr::ObjCOwnership) 9512 return true; 9513 9514 Ty = Attr->getModifiedType(); 9515 9516 // X *__strong (...) 9517 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9518 Ty = Paren->getInnerType(); 9519 9520 // We do not want to look through typedefs, typeof(expr), 9521 // typeof(type), or any other way that the type is somehow 9522 // abstracted. 9523 } else { 9524 return false; 9525 } 9526 } 9527 } 9528 9529 //===----------------------------------------------------------------------===// 9530 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9531 //===----------------------------------------------------------------------===// 9532 9533 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9534 /// inheritance hierarchy of 'rProto'. 9535 bool 9536 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9537 ObjCProtocolDecl *rProto) const { 9538 if (declaresSameEntity(lProto, rProto)) 9539 return true; 9540 for (auto *PI : rProto->protocols()) 9541 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9542 return true; 9543 return false; 9544 } 9545 9546 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9547 /// Class<pr1, ...>. 9548 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9549 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9550 for (auto *lhsProto : lhs->quals()) { 9551 bool match = false; 9552 for (auto *rhsProto : rhs->quals()) { 9553 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9554 match = true; 9555 break; 9556 } 9557 } 9558 if (!match) 9559 return false; 9560 } 9561 return true; 9562 } 9563 9564 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9565 /// ObjCQualifiedIDType. 9566 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9567 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9568 bool compare) { 9569 // Allow id<P..> and an 'id' in all cases. 9570 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9571 return true; 9572 9573 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9574 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9575 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9576 return false; 9577 9578 if (lhs->isObjCQualifiedIdType()) { 9579 if (rhs->qual_empty()) { 9580 // If the RHS is a unqualified interface pointer "NSString*", 9581 // make sure we check the class hierarchy. 9582 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9583 for (auto *I : lhs->quals()) { 9584 // when comparing an id<P> on lhs with a static type on rhs, 9585 // see if static class implements all of id's protocols, directly or 9586 // through its super class and categories. 9587 if (!rhsID->ClassImplementsProtocol(I, true)) 9588 return false; 9589 } 9590 } 9591 // If there are no qualifiers and no interface, we have an 'id'. 9592 return true; 9593 } 9594 // Both the right and left sides have qualifiers. 9595 for (auto *lhsProto : lhs->quals()) { 9596 bool match = false; 9597 9598 // when comparing an id<P> on lhs with a static type on rhs, 9599 // see if static class implements all of id's protocols, directly or 9600 // through its super class and categories. 9601 for (auto *rhsProto : rhs->quals()) { 9602 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9603 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9604 match = true; 9605 break; 9606 } 9607 } 9608 // If the RHS is a qualified interface pointer "NSString<P>*", 9609 // make sure we check the class hierarchy. 9610 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9611 for (auto *I : lhs->quals()) { 9612 // when comparing an id<P> on lhs with a static type on rhs, 9613 // see if static class implements all of id's protocols, directly or 9614 // through its super class and categories. 9615 if (rhsID->ClassImplementsProtocol(I, true)) { 9616 match = true; 9617 break; 9618 } 9619 } 9620 } 9621 if (!match) 9622 return false; 9623 } 9624 9625 return true; 9626 } 9627 9628 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9629 9630 if (lhs->getInterfaceType()) { 9631 // If both the right and left sides have qualifiers. 9632 for (auto *lhsProto : lhs->quals()) { 9633 bool match = false; 9634 9635 // when comparing an id<P> on rhs with a static type on lhs, 9636 // see if static class implements all of id's protocols, directly or 9637 // through its super class and categories. 9638 // First, lhs protocols in the qualifier list must be found, direct 9639 // or indirect in rhs's qualifier list or it is a mismatch. 9640 for (auto *rhsProto : rhs->quals()) { 9641 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9642 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9643 match = true; 9644 break; 9645 } 9646 } 9647 if (!match) 9648 return false; 9649 } 9650 9651 // Static class's protocols, or its super class or category protocols 9652 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9653 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9654 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9655 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9656 // This is rather dubious but matches gcc's behavior. If lhs has 9657 // no type qualifier and its class has no static protocol(s) 9658 // assume that it is mismatch. 9659 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9660 return false; 9661 for (auto *lhsProto : LHSInheritedProtocols) { 9662 bool match = false; 9663 for (auto *rhsProto : rhs->quals()) { 9664 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9665 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9666 match = true; 9667 break; 9668 } 9669 } 9670 if (!match) 9671 return false; 9672 } 9673 } 9674 return true; 9675 } 9676 return false; 9677 } 9678 9679 /// canAssignObjCInterfaces - Return true if the two interface types are 9680 /// compatible for assignment from RHS to LHS. This handles validation of any 9681 /// protocol qualifiers on the LHS or RHS. 9682 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9683 const ObjCObjectPointerType *RHSOPT) { 9684 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9685 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9686 9687 // If either type represents the built-in 'id' type, return true. 9688 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9689 return true; 9690 9691 // Function object that propagates a successful result or handles 9692 // __kindof types. 9693 auto finish = [&](bool succeeded) -> bool { 9694 if (succeeded) 9695 return true; 9696 9697 if (!RHS->isKindOfType()) 9698 return false; 9699 9700 // Strip off __kindof and protocol qualifiers, then check whether 9701 // we can assign the other way. 9702 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9703 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9704 }; 9705 9706 // Casts from or to id<P> are allowed when the other side has compatible 9707 // protocols. 9708 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9709 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9710 } 9711 9712 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9713 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9714 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9715 } 9716 9717 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9718 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9719 return true; 9720 } 9721 9722 // If we have 2 user-defined types, fall into that path. 9723 if (LHS->getInterface() && RHS->getInterface()) { 9724 return finish(canAssignObjCInterfaces(LHS, RHS)); 9725 } 9726 9727 return false; 9728 } 9729 9730 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9731 /// for providing type-safety for objective-c pointers used to pass/return 9732 /// arguments in block literals. When passed as arguments, passing 'A*' where 9733 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9734 /// not OK. For the return type, the opposite is not OK. 9735 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9736 const ObjCObjectPointerType *LHSOPT, 9737 const ObjCObjectPointerType *RHSOPT, 9738 bool BlockReturnType) { 9739 9740 // Function object that propagates a successful result or handles 9741 // __kindof types. 9742 auto finish = [&](bool succeeded) -> bool { 9743 if (succeeded) 9744 return true; 9745 9746 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9747 if (!Expected->isKindOfType()) 9748 return false; 9749 9750 // Strip off __kindof and protocol qualifiers, then check whether 9751 // we can assign the other way. 9752 return canAssignObjCInterfacesInBlockPointer( 9753 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9754 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9755 BlockReturnType); 9756 }; 9757 9758 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9759 return true; 9760 9761 if (LHSOPT->isObjCBuiltinType()) { 9762 return finish(RHSOPT->isObjCBuiltinType() || 9763 RHSOPT->isObjCQualifiedIdType()); 9764 } 9765 9766 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9767 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9768 // Use for block parameters previous type checking for compatibility. 9769 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9770 // Or corrected type checking as in non-compat mode. 9771 (!BlockReturnType && 9772 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9773 else 9774 return finish(ObjCQualifiedIdTypesAreCompatible( 9775 (BlockReturnType ? LHSOPT : RHSOPT), 9776 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9777 } 9778 9779 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9780 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9781 if (LHS && RHS) { // We have 2 user-defined types. 9782 if (LHS != RHS) { 9783 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9784 return finish(BlockReturnType); 9785 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9786 return finish(!BlockReturnType); 9787 } 9788 else 9789 return true; 9790 } 9791 return false; 9792 } 9793 9794 /// Comparison routine for Objective-C protocols to be used with 9795 /// llvm::array_pod_sort. 9796 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9797 ObjCProtocolDecl * const *rhs) { 9798 return (*lhs)->getName().compare((*rhs)->getName()); 9799 } 9800 9801 /// getIntersectionOfProtocols - This routine finds the intersection of set 9802 /// of protocols inherited from two distinct objective-c pointer objects with 9803 /// the given common base. 9804 /// It is used to build composite qualifier list of the composite type of 9805 /// the conditional expression involving two objective-c pointer objects. 9806 static 9807 void getIntersectionOfProtocols(ASTContext &Context, 9808 const ObjCInterfaceDecl *CommonBase, 9809 const ObjCObjectPointerType *LHSOPT, 9810 const ObjCObjectPointerType *RHSOPT, 9811 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9812 9813 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9814 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9815 assert(LHS->getInterface() && "LHS must have an interface base"); 9816 assert(RHS->getInterface() && "RHS must have an interface base"); 9817 9818 // Add all of the protocols for the LHS. 9819 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9820 9821 // Start with the protocol qualifiers. 9822 for (auto *proto : LHS->quals()) { 9823 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9824 } 9825 9826 // Also add the protocols associated with the LHS interface. 9827 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9828 9829 // Add all of the protocols for the RHS. 9830 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9831 9832 // Start with the protocol qualifiers. 9833 for (auto *proto : RHS->quals()) { 9834 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9835 } 9836 9837 // Also add the protocols associated with the RHS interface. 9838 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9839 9840 // Compute the intersection of the collected protocol sets. 9841 for (auto *proto : LHSProtocolSet) { 9842 if (RHSProtocolSet.count(proto)) 9843 IntersectionSet.push_back(proto); 9844 } 9845 9846 // Compute the set of protocols that is implied by either the common type or 9847 // the protocols within the intersection. 9848 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9849 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9850 9851 // Remove any implied protocols from the list of inherited protocols. 9852 if (!ImpliedProtocols.empty()) { 9853 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9854 return ImpliedProtocols.contains(proto); 9855 }); 9856 } 9857 9858 // Sort the remaining protocols by name. 9859 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9860 compareObjCProtocolsByName); 9861 } 9862 9863 /// Determine whether the first type is a subtype of the second. 9864 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9865 QualType rhs) { 9866 // Common case: two object pointers. 9867 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9868 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9869 if (lhsOPT && rhsOPT) 9870 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9871 9872 // Two block pointers. 9873 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9874 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9875 if (lhsBlock && rhsBlock) 9876 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9877 9878 // If either is an unqualified 'id' and the other is a block, it's 9879 // acceptable. 9880 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9881 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9882 return true; 9883 9884 return false; 9885 } 9886 9887 // Check that the given Objective-C type argument lists are equivalent. 9888 static bool sameObjCTypeArgs(ASTContext &ctx, 9889 const ObjCInterfaceDecl *iface, 9890 ArrayRef<QualType> lhsArgs, 9891 ArrayRef<QualType> rhsArgs, 9892 bool stripKindOf) { 9893 if (lhsArgs.size() != rhsArgs.size()) 9894 return false; 9895 9896 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9897 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9898 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9899 continue; 9900 9901 switch (typeParams->begin()[i]->getVariance()) { 9902 case ObjCTypeParamVariance::Invariant: 9903 if (!stripKindOf || 9904 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9905 rhsArgs[i].stripObjCKindOfType(ctx))) { 9906 return false; 9907 } 9908 break; 9909 9910 case ObjCTypeParamVariance::Covariant: 9911 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 9912 return false; 9913 break; 9914 9915 case ObjCTypeParamVariance::Contravariant: 9916 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 9917 return false; 9918 break; 9919 } 9920 } 9921 9922 return true; 9923 } 9924 9925 QualType ASTContext::areCommonBaseCompatible( 9926 const ObjCObjectPointerType *Lptr, 9927 const ObjCObjectPointerType *Rptr) { 9928 const ObjCObjectType *LHS = Lptr->getObjectType(); 9929 const ObjCObjectType *RHS = Rptr->getObjectType(); 9930 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 9931 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 9932 9933 if (!LDecl || !RDecl) 9934 return {}; 9935 9936 // When either LHS or RHS is a kindof type, we should return a kindof type. 9937 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 9938 // kindof(A). 9939 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 9940 9941 // Follow the left-hand side up the class hierarchy until we either hit a 9942 // root or find the RHS. Record the ancestors in case we don't find it. 9943 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 9944 LHSAncestors; 9945 while (true) { 9946 // Record this ancestor. We'll need this if the common type isn't in the 9947 // path from the LHS to the root. 9948 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 9949 9950 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 9951 // Get the type arguments. 9952 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 9953 bool anyChanges = false; 9954 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9955 // Both have type arguments, compare them. 9956 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9957 LHS->getTypeArgs(), RHS->getTypeArgs(), 9958 /*stripKindOf=*/true)) 9959 return {}; 9960 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9961 // If only one has type arguments, the result will not have type 9962 // arguments. 9963 LHSTypeArgs = {}; 9964 anyChanges = true; 9965 } 9966 9967 // Compute the intersection of protocols. 9968 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9969 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 9970 Protocols); 9971 if (!Protocols.empty()) 9972 anyChanges = true; 9973 9974 // If anything in the LHS will have changed, build a new result type. 9975 // If we need to return a kindof type but LHS is not a kindof type, we 9976 // build a new result type. 9977 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 9978 QualType Result = getObjCInterfaceType(LHS->getInterface()); 9979 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 9980 anyKindOf || LHS->isKindOfType()); 9981 return getObjCObjectPointerType(Result); 9982 } 9983 9984 return getObjCObjectPointerType(QualType(LHS, 0)); 9985 } 9986 9987 // Find the superclass. 9988 QualType LHSSuperType = LHS->getSuperClassType(); 9989 if (LHSSuperType.isNull()) 9990 break; 9991 9992 LHS = LHSSuperType->castAs<ObjCObjectType>(); 9993 } 9994 9995 // We didn't find anything by following the LHS to its root; now check 9996 // the RHS against the cached set of ancestors. 9997 while (true) { 9998 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 9999 if (KnownLHS != LHSAncestors.end()) { 10000 LHS = KnownLHS->second; 10001 10002 // Get the type arguments. 10003 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 10004 bool anyChanges = false; 10005 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10006 // Both have type arguments, compare them. 10007 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10008 LHS->getTypeArgs(), RHS->getTypeArgs(), 10009 /*stripKindOf=*/true)) 10010 return {}; 10011 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10012 // If only one has type arguments, the result will not have type 10013 // arguments. 10014 RHSTypeArgs = {}; 10015 anyChanges = true; 10016 } 10017 10018 // Compute the intersection of protocols. 10019 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10020 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 10021 Protocols); 10022 if (!Protocols.empty()) 10023 anyChanges = true; 10024 10025 // If we need to return a kindof type but RHS is not a kindof type, we 10026 // build a new result type. 10027 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 10028 QualType Result = getObjCInterfaceType(RHS->getInterface()); 10029 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 10030 anyKindOf || RHS->isKindOfType()); 10031 return getObjCObjectPointerType(Result); 10032 } 10033 10034 return getObjCObjectPointerType(QualType(RHS, 0)); 10035 } 10036 10037 // Find the superclass of the RHS. 10038 QualType RHSSuperType = RHS->getSuperClassType(); 10039 if (RHSSuperType.isNull()) 10040 break; 10041 10042 RHS = RHSSuperType->castAs<ObjCObjectType>(); 10043 } 10044 10045 return {}; 10046 } 10047 10048 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 10049 const ObjCObjectType *RHS) { 10050 assert(LHS->getInterface() && "LHS is not an interface type"); 10051 assert(RHS->getInterface() && "RHS is not an interface type"); 10052 10053 // Verify that the base decls are compatible: the RHS must be a subclass of 10054 // the LHS. 10055 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 10056 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 10057 if (!IsSuperClass) 10058 return false; 10059 10060 // If the LHS has protocol qualifiers, determine whether all of them are 10061 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 10062 // LHS). 10063 if (LHS->getNumProtocols() > 0) { 10064 // OK if conversion of LHS to SuperClass results in narrowing of types 10065 // ; i.e., SuperClass may implement at least one of the protocols 10066 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 10067 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 10068 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 10069 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 10070 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 10071 // qualifiers. 10072 for (auto *RHSPI : RHS->quals()) 10073 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 10074 // If there is no protocols associated with RHS, it is not a match. 10075 if (SuperClassInheritedProtocols.empty()) 10076 return false; 10077 10078 for (const auto *LHSProto : LHS->quals()) { 10079 bool SuperImplementsProtocol = false; 10080 for (auto *SuperClassProto : SuperClassInheritedProtocols) 10081 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 10082 SuperImplementsProtocol = true; 10083 break; 10084 } 10085 if (!SuperImplementsProtocol) 10086 return false; 10087 } 10088 } 10089 10090 // If the LHS is specialized, we may need to check type arguments. 10091 if (LHS->isSpecialized()) { 10092 // Follow the superclass chain until we've matched the LHS class in the 10093 // hierarchy. This substitutes type arguments through. 10094 const ObjCObjectType *RHSSuper = RHS; 10095 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 10096 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 10097 10098 // If the RHS is specializd, compare type arguments. 10099 if (RHSSuper->isSpecialized() && 10100 !sameObjCTypeArgs(*this, LHS->getInterface(), 10101 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 10102 /*stripKindOf=*/true)) { 10103 return false; 10104 } 10105 } 10106 10107 return true; 10108 } 10109 10110 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 10111 // get the "pointed to" types 10112 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 10113 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 10114 10115 if (!LHSOPT || !RHSOPT) 10116 return false; 10117 10118 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 10119 canAssignObjCInterfaces(RHSOPT, LHSOPT); 10120 } 10121 10122 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 10123 return canAssignObjCInterfaces( 10124 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 10125 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 10126 } 10127 10128 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 10129 /// both shall have the identically qualified version of a compatible type. 10130 /// C99 6.2.7p1: Two types have compatible types if their types are the 10131 /// same. See 6.7.[2,3,5] for additional rules. 10132 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 10133 bool CompareUnqualified) { 10134 if (getLangOpts().CPlusPlus) 10135 return hasSameType(LHS, RHS); 10136 10137 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 10138 } 10139 10140 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 10141 return typesAreCompatible(LHS, RHS); 10142 } 10143 10144 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 10145 return !mergeTypes(LHS, RHS, true).isNull(); 10146 } 10147 10148 /// mergeTransparentUnionType - if T is a transparent union type and a member 10149 /// of T is compatible with SubType, return the merged type, else return 10150 /// QualType() 10151 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 10152 bool OfBlockPointer, 10153 bool Unqualified) { 10154 if (const RecordType *UT = T->getAsUnionType()) { 10155 RecordDecl *UD = UT->getDecl(); 10156 if (UD->hasAttr<TransparentUnionAttr>()) { 10157 for (const auto *I : UD->fields()) { 10158 QualType ET = I->getType().getUnqualifiedType(); 10159 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 10160 if (!MT.isNull()) 10161 return MT; 10162 } 10163 } 10164 } 10165 10166 return {}; 10167 } 10168 10169 /// mergeFunctionParameterTypes - merge two types which appear as function 10170 /// parameter types 10171 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 10172 bool OfBlockPointer, 10173 bool Unqualified) { 10174 // GNU extension: two types are compatible if they appear as a function 10175 // argument, one of the types is a transparent union type and the other 10176 // type is compatible with a union member 10177 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 10178 Unqualified); 10179 if (!lmerge.isNull()) 10180 return lmerge; 10181 10182 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10183 Unqualified); 10184 if (!rmerge.isNull()) 10185 return rmerge; 10186 10187 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10188 } 10189 10190 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10191 bool OfBlockPointer, bool Unqualified, 10192 bool AllowCXX, 10193 bool IsConditionalOperator) { 10194 const auto *lbase = lhs->castAs<FunctionType>(); 10195 const auto *rbase = rhs->castAs<FunctionType>(); 10196 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10197 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10198 bool allLTypes = true; 10199 bool allRTypes = true; 10200 10201 // Check return type 10202 QualType retType; 10203 if (OfBlockPointer) { 10204 QualType RHS = rbase->getReturnType(); 10205 QualType LHS = lbase->getReturnType(); 10206 bool UnqualifiedResult = Unqualified; 10207 if (!UnqualifiedResult) 10208 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10209 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10210 } 10211 else 10212 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10213 Unqualified); 10214 if (retType.isNull()) 10215 return {}; 10216 10217 if (Unqualified) 10218 retType = retType.getUnqualifiedType(); 10219 10220 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10221 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10222 if (Unqualified) { 10223 LRetType = LRetType.getUnqualifiedType(); 10224 RRetType = RRetType.getUnqualifiedType(); 10225 } 10226 10227 if (getCanonicalType(retType) != LRetType) 10228 allLTypes = false; 10229 if (getCanonicalType(retType) != RRetType) 10230 allRTypes = false; 10231 10232 // FIXME: double check this 10233 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10234 // rbase->getRegParmAttr() != 0 && 10235 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10236 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10237 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10238 10239 // Compatible functions must have compatible calling conventions 10240 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10241 return {}; 10242 10243 // Regparm is part of the calling convention. 10244 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10245 return {}; 10246 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10247 return {}; 10248 10249 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10250 return {}; 10251 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10252 return {}; 10253 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10254 return {}; 10255 10256 // When merging declarations, it's common for supplemental information like 10257 // attributes to only be present in one of the declarations, and we generally 10258 // want type merging to preserve the union of information. So a merged 10259 // function type should be noreturn if it was noreturn in *either* operand 10260 // type. 10261 // 10262 // But for the conditional operator, this is backwards. The result of the 10263 // operator could be either operand, and its type should conservatively 10264 // reflect that. So a function type in a composite type is noreturn only 10265 // if it's noreturn in *both* operand types. 10266 // 10267 // Arguably, noreturn is a kind of subtype, and the conditional operator 10268 // ought to produce the most specific common supertype of its operand types. 10269 // That would differ from this rule in contravariant positions. However, 10270 // neither C nor C++ generally uses this kind of subtype reasoning. Also, 10271 // as a practical matter, it would only affect C code that does abstraction of 10272 // higher-order functions (taking noreturn callbacks!), which is uncommon to 10273 // say the least. So we use the simpler rule. 10274 bool NoReturn = IsConditionalOperator 10275 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn() 10276 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10277 if (lbaseInfo.getNoReturn() != NoReturn) 10278 allLTypes = false; 10279 if (rbaseInfo.getNoReturn() != NoReturn) 10280 allRTypes = false; 10281 10282 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10283 10284 if (lproto && rproto) { // two C99 style function prototypes 10285 assert((AllowCXX || 10286 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10287 "C++ shouldn't be here"); 10288 // Compatible functions must have the same number of parameters 10289 if (lproto->getNumParams() != rproto->getNumParams()) 10290 return {}; 10291 10292 // Variadic and non-variadic functions aren't compatible 10293 if (lproto->isVariadic() != rproto->isVariadic()) 10294 return {}; 10295 10296 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10297 return {}; 10298 10299 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10300 bool canUseLeft, canUseRight; 10301 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10302 newParamInfos)) 10303 return {}; 10304 10305 if (!canUseLeft) 10306 allLTypes = false; 10307 if (!canUseRight) 10308 allRTypes = false; 10309 10310 // Check parameter type compatibility 10311 SmallVector<QualType, 10> types; 10312 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10313 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10314 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10315 QualType paramType = mergeFunctionParameterTypes( 10316 lParamType, rParamType, OfBlockPointer, Unqualified); 10317 if (paramType.isNull()) 10318 return {}; 10319 10320 if (Unqualified) 10321 paramType = paramType.getUnqualifiedType(); 10322 10323 types.push_back(paramType); 10324 if (Unqualified) { 10325 lParamType = lParamType.getUnqualifiedType(); 10326 rParamType = rParamType.getUnqualifiedType(); 10327 } 10328 10329 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10330 allLTypes = false; 10331 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10332 allRTypes = false; 10333 } 10334 10335 if (allLTypes) return lhs; 10336 if (allRTypes) return rhs; 10337 10338 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10339 EPI.ExtInfo = einfo; 10340 EPI.ExtParameterInfos = 10341 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10342 return getFunctionType(retType, types, EPI); 10343 } 10344 10345 if (lproto) allRTypes = false; 10346 if (rproto) allLTypes = false; 10347 10348 const FunctionProtoType *proto = lproto ? lproto : rproto; 10349 if (proto) { 10350 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10351 if (proto->isVariadic()) 10352 return {}; 10353 // Check that the types are compatible with the types that 10354 // would result from default argument promotions (C99 6.7.5.3p15). 10355 // The only types actually affected are promotable integer 10356 // types and floats, which would be passed as a different 10357 // type depending on whether the prototype is visible. 10358 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10359 QualType paramTy = proto->getParamType(i); 10360 10361 // Look at the converted type of enum types, since that is the type used 10362 // to pass enum values. 10363 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10364 paramTy = Enum->getDecl()->getIntegerType(); 10365 if (paramTy.isNull()) 10366 return {}; 10367 } 10368 10369 if (isPromotableIntegerType(paramTy) || 10370 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10371 return {}; 10372 } 10373 10374 if (allLTypes) return lhs; 10375 if (allRTypes) return rhs; 10376 10377 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10378 EPI.ExtInfo = einfo; 10379 return getFunctionType(retType, proto->getParamTypes(), EPI); 10380 } 10381 10382 if (allLTypes) return lhs; 10383 if (allRTypes) return rhs; 10384 return getFunctionNoProtoType(retType, einfo); 10385 } 10386 10387 /// Given that we have an enum type and a non-enum type, try to merge them. 10388 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10389 QualType other, bool isBlockReturnType) { 10390 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10391 // a signed integer type, or an unsigned integer type. 10392 // Compatibility is based on the underlying type, not the promotion 10393 // type. 10394 QualType underlyingType = ET->getDecl()->getIntegerType(); 10395 if (underlyingType.isNull()) 10396 return {}; 10397 if (Context.hasSameType(underlyingType, other)) 10398 return other; 10399 10400 // In block return types, we're more permissive and accept any 10401 // integral type of the same size. 10402 if (isBlockReturnType && other->isIntegerType() && 10403 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10404 return other; 10405 10406 return {}; 10407 } 10408 10409 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, 10410 bool Unqualified, bool BlockReturnType, 10411 bool IsConditionalOperator) { 10412 // For C++ we will not reach this code with reference types (see below), 10413 // for OpenMP variant call overloading we might. 10414 // 10415 // C++ [expr]: If an expression initially has the type "reference to T", the 10416 // type is adjusted to "T" prior to any further analysis, the expression 10417 // designates the object or function denoted by the reference, and the 10418 // expression is an lvalue unless the reference is an rvalue reference and 10419 // the expression is a function call (possibly inside parentheses). 10420 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10421 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10422 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10423 LHS->getTypeClass() == RHS->getTypeClass()) 10424 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10425 OfBlockPointer, Unqualified, BlockReturnType); 10426 if (LHSRefTy || RHSRefTy) 10427 return {}; 10428 10429 if (Unqualified) { 10430 LHS = LHS.getUnqualifiedType(); 10431 RHS = RHS.getUnqualifiedType(); 10432 } 10433 10434 QualType LHSCan = getCanonicalType(LHS), 10435 RHSCan = getCanonicalType(RHS); 10436 10437 // If two types are identical, they are compatible. 10438 if (LHSCan == RHSCan) 10439 return LHS; 10440 10441 // If the qualifiers are different, the types aren't compatible... mostly. 10442 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10443 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10444 if (LQuals != RQuals) { 10445 // If any of these qualifiers are different, we have a type 10446 // mismatch. 10447 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10448 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10449 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10450 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10451 return {}; 10452 10453 // Exactly one GC qualifier difference is allowed: __strong is 10454 // okay if the other type has no GC qualifier but is an Objective 10455 // C object pointer (i.e. implicitly strong by default). We fix 10456 // this by pretending that the unqualified type was actually 10457 // qualified __strong. 10458 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10459 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10460 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10461 10462 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10463 return {}; 10464 10465 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10466 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10467 } 10468 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10469 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10470 } 10471 return {}; 10472 } 10473 10474 // Okay, qualifiers are equal. 10475 10476 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10477 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10478 10479 // We want to consider the two function types to be the same for these 10480 // comparisons, just force one to the other. 10481 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10482 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10483 10484 // Same as above for arrays 10485 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10486 LHSClass = Type::ConstantArray; 10487 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10488 RHSClass = Type::ConstantArray; 10489 10490 // ObjCInterfaces are just specialized ObjCObjects. 10491 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10492 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10493 10494 // Canonicalize ExtVector -> Vector. 10495 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10496 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10497 10498 // If the canonical type classes don't match. 10499 if (LHSClass != RHSClass) { 10500 // Note that we only have special rules for turning block enum 10501 // returns into block int returns, not vice-versa. 10502 if (const auto *ETy = LHS->getAs<EnumType>()) { 10503 return mergeEnumWithInteger(*this, ETy, RHS, false); 10504 } 10505 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10506 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10507 } 10508 // allow block pointer type to match an 'id' type. 10509 if (OfBlockPointer && !BlockReturnType) { 10510 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10511 return LHS; 10512 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10513 return RHS; 10514 } 10515 // Allow __auto_type to match anything; it merges to the type with more 10516 // information. 10517 if (const auto *AT = LHS->getAs<AutoType>()) { 10518 if (!AT->isDeduced() && AT->isGNUAutoType()) 10519 return RHS; 10520 } 10521 if (const auto *AT = RHS->getAs<AutoType>()) { 10522 if (!AT->isDeduced() && AT->isGNUAutoType()) 10523 return LHS; 10524 } 10525 return {}; 10526 } 10527 10528 // The canonical type classes match. 10529 switch (LHSClass) { 10530 #define TYPE(Class, Base) 10531 #define ABSTRACT_TYPE(Class, Base) 10532 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10533 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10534 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10535 #include "clang/AST/TypeNodes.inc" 10536 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10537 10538 case Type::Auto: 10539 case Type::DeducedTemplateSpecialization: 10540 case Type::LValueReference: 10541 case Type::RValueReference: 10542 case Type::MemberPointer: 10543 llvm_unreachable("C++ should never be in mergeTypes"); 10544 10545 case Type::ObjCInterface: 10546 case Type::IncompleteArray: 10547 case Type::VariableArray: 10548 case Type::FunctionProto: 10549 case Type::ExtVector: 10550 llvm_unreachable("Types are eliminated above"); 10551 10552 case Type::Pointer: 10553 { 10554 // Merge two pointer types, while trying to preserve typedef info 10555 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10556 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10557 if (Unqualified) { 10558 LHSPointee = LHSPointee.getUnqualifiedType(); 10559 RHSPointee = RHSPointee.getUnqualifiedType(); 10560 } 10561 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10562 Unqualified); 10563 if (ResultType.isNull()) 10564 return {}; 10565 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10566 return LHS; 10567 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10568 return RHS; 10569 return getPointerType(ResultType); 10570 } 10571 case Type::BlockPointer: 10572 { 10573 // Merge two block pointer types, while trying to preserve typedef info 10574 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10575 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10576 if (Unqualified) { 10577 LHSPointee = LHSPointee.getUnqualifiedType(); 10578 RHSPointee = RHSPointee.getUnqualifiedType(); 10579 } 10580 if (getLangOpts().OpenCL) { 10581 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10582 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10583 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10584 // 6.12.5) thus the following check is asymmetric. 10585 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10586 return {}; 10587 LHSPteeQual.removeAddressSpace(); 10588 RHSPteeQual.removeAddressSpace(); 10589 LHSPointee = 10590 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10591 RHSPointee = 10592 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10593 } 10594 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10595 Unqualified); 10596 if (ResultType.isNull()) 10597 return {}; 10598 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10599 return LHS; 10600 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10601 return RHS; 10602 return getBlockPointerType(ResultType); 10603 } 10604 case Type::Atomic: 10605 { 10606 // Merge two pointer types, while trying to preserve typedef info 10607 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10608 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10609 if (Unqualified) { 10610 LHSValue = LHSValue.getUnqualifiedType(); 10611 RHSValue = RHSValue.getUnqualifiedType(); 10612 } 10613 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10614 Unqualified); 10615 if (ResultType.isNull()) 10616 return {}; 10617 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10618 return LHS; 10619 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10620 return RHS; 10621 return getAtomicType(ResultType); 10622 } 10623 case Type::ConstantArray: 10624 { 10625 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10626 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10627 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10628 return {}; 10629 10630 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10631 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10632 if (Unqualified) { 10633 LHSElem = LHSElem.getUnqualifiedType(); 10634 RHSElem = RHSElem.getUnqualifiedType(); 10635 } 10636 10637 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10638 if (ResultType.isNull()) 10639 return {}; 10640 10641 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10642 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10643 10644 // If either side is a variable array, and both are complete, check whether 10645 // the current dimension is definite. 10646 if (LVAT || RVAT) { 10647 auto SizeFetch = [this](const VariableArrayType* VAT, 10648 const ConstantArrayType* CAT) 10649 -> std::pair<bool,llvm::APInt> { 10650 if (VAT) { 10651 std::optional<llvm::APSInt> TheInt; 10652 Expr *E = VAT->getSizeExpr(); 10653 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10654 return std::make_pair(true, *TheInt); 10655 return std::make_pair(false, llvm::APSInt()); 10656 } 10657 if (CAT) 10658 return std::make_pair(true, CAT->getSize()); 10659 return std::make_pair(false, llvm::APInt()); 10660 }; 10661 10662 bool HaveLSize, HaveRSize; 10663 llvm::APInt LSize, RSize; 10664 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10665 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10666 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10667 return {}; // Definite, but unequal, array dimension 10668 } 10669 10670 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10671 return LHS; 10672 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10673 return RHS; 10674 if (LCAT) 10675 return getConstantArrayType(ResultType, LCAT->getSize(), 10676 LCAT->getSizeExpr(), 10677 ArrayType::ArraySizeModifier(), 0); 10678 if (RCAT) 10679 return getConstantArrayType(ResultType, RCAT->getSize(), 10680 RCAT->getSizeExpr(), 10681 ArrayType::ArraySizeModifier(), 0); 10682 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10683 return LHS; 10684 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10685 return RHS; 10686 if (LVAT) { 10687 // FIXME: This isn't correct! But tricky to implement because 10688 // the array's size has to be the size of LHS, but the type 10689 // has to be different. 10690 return LHS; 10691 } 10692 if (RVAT) { 10693 // FIXME: This isn't correct! But tricky to implement because 10694 // the array's size has to be the size of RHS, but the type 10695 // has to be different. 10696 return RHS; 10697 } 10698 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10699 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10700 return getIncompleteArrayType(ResultType, 10701 ArrayType::ArraySizeModifier(), 0); 10702 } 10703 case Type::FunctionNoProto: 10704 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified, 10705 /*AllowCXX=*/false, IsConditionalOperator); 10706 case Type::Record: 10707 case Type::Enum: 10708 return {}; 10709 case Type::Builtin: 10710 // Only exactly equal builtin types are compatible, which is tested above. 10711 return {}; 10712 case Type::Complex: 10713 // Distinct complex types are incompatible. 10714 return {}; 10715 case Type::Vector: 10716 // FIXME: The merged type should be an ExtVector! 10717 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10718 RHSCan->castAs<VectorType>())) 10719 return LHS; 10720 return {}; 10721 case Type::ConstantMatrix: 10722 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10723 RHSCan->castAs<ConstantMatrixType>())) 10724 return LHS; 10725 return {}; 10726 case Type::ObjCObject: { 10727 // Check if the types are assignment compatible. 10728 // FIXME: This should be type compatibility, e.g. whether 10729 // "LHS x; RHS x;" at global scope is legal. 10730 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10731 RHS->castAs<ObjCObjectType>())) 10732 return LHS; 10733 return {}; 10734 } 10735 case Type::ObjCObjectPointer: 10736 if (OfBlockPointer) { 10737 if (canAssignObjCInterfacesInBlockPointer( 10738 LHS->castAs<ObjCObjectPointerType>(), 10739 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10740 return LHS; 10741 return {}; 10742 } 10743 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10744 RHS->castAs<ObjCObjectPointerType>())) 10745 return LHS; 10746 return {}; 10747 case Type::Pipe: 10748 assert(LHS != RHS && 10749 "Equivalent pipe types should have already been handled!"); 10750 return {}; 10751 case Type::BitInt: { 10752 // Merge two bit-precise int types, while trying to preserve typedef info. 10753 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10754 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10755 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10756 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10757 10758 // Like unsigned/int, shouldn't have a type if they don't match. 10759 if (LHSUnsigned != RHSUnsigned) 10760 return {}; 10761 10762 if (LHSBits != RHSBits) 10763 return {}; 10764 return LHS; 10765 } 10766 } 10767 10768 llvm_unreachable("Invalid Type::Class!"); 10769 } 10770 10771 bool ASTContext::mergeExtParameterInfo( 10772 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10773 bool &CanUseFirst, bool &CanUseSecond, 10774 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10775 assert(NewParamInfos.empty() && "param info list not empty"); 10776 CanUseFirst = CanUseSecond = true; 10777 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10778 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10779 10780 // Fast path: if the first type doesn't have ext parameter infos, 10781 // we match if and only if the second type also doesn't have them. 10782 if (!FirstHasInfo && !SecondHasInfo) 10783 return true; 10784 10785 bool NeedParamInfo = false; 10786 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10787 : SecondFnType->getExtParameterInfos().size(); 10788 10789 for (size_t I = 0; I < E; ++I) { 10790 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10791 if (FirstHasInfo) 10792 FirstParam = FirstFnType->getExtParameterInfo(I); 10793 if (SecondHasInfo) 10794 SecondParam = SecondFnType->getExtParameterInfo(I); 10795 10796 // Cannot merge unless everything except the noescape flag matches. 10797 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10798 return false; 10799 10800 bool FirstNoEscape = FirstParam.isNoEscape(); 10801 bool SecondNoEscape = SecondParam.isNoEscape(); 10802 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10803 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10804 if (NewParamInfos.back().getOpaqueValue()) 10805 NeedParamInfo = true; 10806 if (FirstNoEscape != IsNoEscape) 10807 CanUseFirst = false; 10808 if (SecondNoEscape != IsNoEscape) 10809 CanUseSecond = false; 10810 } 10811 10812 if (!NeedParamInfo) 10813 NewParamInfos.clear(); 10814 10815 return true; 10816 } 10817 10818 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10819 ObjCLayouts[CD] = nullptr; 10820 } 10821 10822 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10823 /// 'RHS' attributes and returns the merged version; including for function 10824 /// return types. 10825 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10826 QualType LHSCan = getCanonicalType(LHS), 10827 RHSCan = getCanonicalType(RHS); 10828 // If two types are identical, they are compatible. 10829 if (LHSCan == RHSCan) 10830 return LHS; 10831 if (RHSCan->isFunctionType()) { 10832 if (!LHSCan->isFunctionType()) 10833 return {}; 10834 QualType OldReturnType = 10835 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10836 QualType NewReturnType = 10837 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10838 QualType ResReturnType = 10839 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10840 if (ResReturnType.isNull()) 10841 return {}; 10842 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10843 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10844 // In either case, use OldReturnType to build the new function type. 10845 const auto *F = LHS->castAs<FunctionType>(); 10846 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10847 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10848 EPI.ExtInfo = getFunctionExtInfo(LHS); 10849 QualType ResultType = 10850 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10851 return ResultType; 10852 } 10853 } 10854 return {}; 10855 } 10856 10857 // If the qualifiers are different, the types can still be merged. 10858 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10859 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10860 if (LQuals != RQuals) { 10861 // If any of these qualifiers are different, we have a type mismatch. 10862 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10863 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10864 return {}; 10865 10866 // Exactly one GC qualifier difference is allowed: __strong is 10867 // okay if the other type has no GC qualifier but is an Objective 10868 // C object pointer (i.e. implicitly strong by default). We fix 10869 // this by pretending that the unqualified type was actually 10870 // qualified __strong. 10871 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10872 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10873 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10874 10875 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10876 return {}; 10877 10878 if (GC_L == Qualifiers::Strong) 10879 return LHS; 10880 if (GC_R == Qualifiers::Strong) 10881 return RHS; 10882 return {}; 10883 } 10884 10885 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10886 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10887 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10888 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10889 if (ResQT == LHSBaseQT) 10890 return LHS; 10891 if (ResQT == RHSBaseQT) 10892 return RHS; 10893 } 10894 return {}; 10895 } 10896 10897 //===----------------------------------------------------------------------===// 10898 // Integer Predicates 10899 //===----------------------------------------------------------------------===// 10900 10901 unsigned ASTContext::getIntWidth(QualType T) const { 10902 if (const auto *ET = T->getAs<EnumType>()) 10903 T = ET->getDecl()->getIntegerType(); 10904 if (T->isBooleanType()) 10905 return 1; 10906 if (const auto *EIT = T->getAs<BitIntType>()) 10907 return EIT->getNumBits(); 10908 // For builtin types, just use the standard type sizing method 10909 return (unsigned)getTypeSize(T); 10910 } 10911 10912 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10913 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 10914 T->isFixedPointType()) && 10915 "Unexpected type"); 10916 10917 // Turn <4 x signed int> -> <4 x unsigned int> 10918 if (const auto *VTy = T->getAs<VectorType>()) 10919 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 10920 VTy->getNumElements(), VTy->getVectorKind()); 10921 10922 // For _BitInt, return an unsigned _BitInt with same width. 10923 if (const auto *EITy = T->getAs<BitIntType>()) 10924 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 10925 10926 // For enums, get the underlying integer type of the enum, and let the general 10927 // integer type signchanging code handle it. 10928 if (const auto *ETy = T->getAs<EnumType>()) 10929 T = ETy->getDecl()->getIntegerType(); 10930 10931 switch (T->castAs<BuiltinType>()->getKind()) { 10932 case BuiltinType::Char_U: 10933 // Plain `char` is mapped to `unsigned char` even if it's already unsigned 10934 case BuiltinType::Char_S: 10935 case BuiltinType::SChar: 10936 case BuiltinType::Char8: 10937 return UnsignedCharTy; 10938 case BuiltinType::Short: 10939 return UnsignedShortTy; 10940 case BuiltinType::Int: 10941 return UnsignedIntTy; 10942 case BuiltinType::Long: 10943 return UnsignedLongTy; 10944 case BuiltinType::LongLong: 10945 return UnsignedLongLongTy; 10946 case BuiltinType::Int128: 10947 return UnsignedInt128Ty; 10948 // wchar_t is special. It is either signed or not, but when it's signed, 10949 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 10950 // version of its underlying type instead. 10951 case BuiltinType::WChar_S: 10952 return getUnsignedWCharType(); 10953 10954 case BuiltinType::ShortAccum: 10955 return UnsignedShortAccumTy; 10956 case BuiltinType::Accum: 10957 return UnsignedAccumTy; 10958 case BuiltinType::LongAccum: 10959 return UnsignedLongAccumTy; 10960 case BuiltinType::SatShortAccum: 10961 return SatUnsignedShortAccumTy; 10962 case BuiltinType::SatAccum: 10963 return SatUnsignedAccumTy; 10964 case BuiltinType::SatLongAccum: 10965 return SatUnsignedLongAccumTy; 10966 case BuiltinType::ShortFract: 10967 return UnsignedShortFractTy; 10968 case BuiltinType::Fract: 10969 return UnsignedFractTy; 10970 case BuiltinType::LongFract: 10971 return UnsignedLongFractTy; 10972 case BuiltinType::SatShortFract: 10973 return SatUnsignedShortFractTy; 10974 case BuiltinType::SatFract: 10975 return SatUnsignedFractTy; 10976 case BuiltinType::SatLongFract: 10977 return SatUnsignedLongFractTy; 10978 default: 10979 assert((T->hasUnsignedIntegerRepresentation() || 10980 T->isUnsignedFixedPointType()) && 10981 "Unexpected signed integer or fixed point type"); 10982 return T; 10983 } 10984 } 10985 10986 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 10987 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 10988 T->isFixedPointType()) && 10989 "Unexpected type"); 10990 10991 // Turn <4 x unsigned int> -> <4 x signed int> 10992 if (const auto *VTy = T->getAs<VectorType>()) 10993 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 10994 VTy->getNumElements(), VTy->getVectorKind()); 10995 10996 // For _BitInt, return a signed _BitInt with same width. 10997 if (const auto *EITy = T->getAs<BitIntType>()) 10998 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 10999 11000 // For enums, get the underlying integer type of the enum, and let the general 11001 // integer type signchanging code handle it. 11002 if (const auto *ETy = T->getAs<EnumType>()) 11003 T = ETy->getDecl()->getIntegerType(); 11004 11005 switch (T->castAs<BuiltinType>()->getKind()) { 11006 case BuiltinType::Char_S: 11007 // Plain `char` is mapped to `signed char` even if it's already signed 11008 case BuiltinType::Char_U: 11009 case BuiltinType::UChar: 11010 case BuiltinType::Char8: 11011 return SignedCharTy; 11012 case BuiltinType::UShort: 11013 return ShortTy; 11014 case BuiltinType::UInt: 11015 return IntTy; 11016 case BuiltinType::ULong: 11017 return LongTy; 11018 case BuiltinType::ULongLong: 11019 return LongLongTy; 11020 case BuiltinType::UInt128: 11021 return Int128Ty; 11022 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 11023 // there's no matching "signed wchar_t". Therefore we return the signed 11024 // version of its underlying type instead. 11025 case BuiltinType::WChar_U: 11026 return getSignedWCharType(); 11027 11028 case BuiltinType::UShortAccum: 11029 return ShortAccumTy; 11030 case BuiltinType::UAccum: 11031 return AccumTy; 11032 case BuiltinType::ULongAccum: 11033 return LongAccumTy; 11034 case BuiltinType::SatUShortAccum: 11035 return SatShortAccumTy; 11036 case BuiltinType::SatUAccum: 11037 return SatAccumTy; 11038 case BuiltinType::SatULongAccum: 11039 return SatLongAccumTy; 11040 case BuiltinType::UShortFract: 11041 return ShortFractTy; 11042 case BuiltinType::UFract: 11043 return FractTy; 11044 case BuiltinType::ULongFract: 11045 return LongFractTy; 11046 case BuiltinType::SatUShortFract: 11047 return SatShortFractTy; 11048 case BuiltinType::SatUFract: 11049 return SatFractTy; 11050 case BuiltinType::SatULongFract: 11051 return SatLongFractTy; 11052 default: 11053 assert( 11054 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 11055 "Unexpected signed integer or fixed point type"); 11056 return T; 11057 } 11058 } 11059 11060 ASTMutationListener::~ASTMutationListener() = default; 11061 11062 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 11063 QualType ReturnType) {} 11064 11065 //===----------------------------------------------------------------------===// 11066 // Builtin Type Computation 11067 //===----------------------------------------------------------------------===// 11068 11069 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 11070 /// pointer over the consumed characters. This returns the resultant type. If 11071 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 11072 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 11073 /// a vector of "i*". 11074 /// 11075 /// RequiresICE is filled in on return to indicate whether the value is required 11076 /// to be an Integer Constant Expression. 11077 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 11078 ASTContext::GetBuiltinTypeError &Error, 11079 bool &RequiresICE, 11080 bool AllowTypeModifiers) { 11081 // Modifiers. 11082 int HowLong = 0; 11083 bool Signed = false, Unsigned = false; 11084 RequiresICE = false; 11085 11086 // Read the prefixed modifiers first. 11087 bool Done = false; 11088 #ifndef NDEBUG 11089 bool IsSpecial = false; 11090 #endif 11091 while (!Done) { 11092 switch (*Str++) { 11093 default: Done = true; --Str; break; 11094 case 'I': 11095 RequiresICE = true; 11096 break; 11097 case 'S': 11098 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 11099 assert(!Signed && "Can't use 'S' modifier multiple times!"); 11100 Signed = true; 11101 break; 11102 case 'U': 11103 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 11104 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 11105 Unsigned = true; 11106 break; 11107 case 'L': 11108 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 11109 assert(HowLong <= 2 && "Can't have LLLL modifier"); 11110 ++HowLong; 11111 break; 11112 case 'N': 11113 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 11114 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11115 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 11116 #ifndef NDEBUG 11117 IsSpecial = true; 11118 #endif 11119 if (Context.getTargetInfo().getLongWidth() == 32) 11120 ++HowLong; 11121 break; 11122 case 'W': 11123 // This modifier represents int64 type. 11124 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11125 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 11126 #ifndef NDEBUG 11127 IsSpecial = true; 11128 #endif 11129 switch (Context.getTargetInfo().getInt64Type()) { 11130 default: 11131 llvm_unreachable("Unexpected integer type"); 11132 case TargetInfo::SignedLong: 11133 HowLong = 1; 11134 break; 11135 case TargetInfo::SignedLongLong: 11136 HowLong = 2; 11137 break; 11138 } 11139 break; 11140 case 'Z': 11141 // This modifier represents int32 type. 11142 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11143 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 11144 #ifndef NDEBUG 11145 IsSpecial = true; 11146 #endif 11147 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 11148 default: 11149 llvm_unreachable("Unexpected integer type"); 11150 case TargetInfo::SignedInt: 11151 HowLong = 0; 11152 break; 11153 case TargetInfo::SignedLong: 11154 HowLong = 1; 11155 break; 11156 case TargetInfo::SignedLongLong: 11157 HowLong = 2; 11158 break; 11159 } 11160 break; 11161 case 'O': 11162 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11163 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 11164 #ifndef NDEBUG 11165 IsSpecial = true; 11166 #endif 11167 if (Context.getLangOpts().OpenCL) 11168 HowLong = 1; 11169 else 11170 HowLong = 2; 11171 break; 11172 } 11173 } 11174 11175 QualType Type; 11176 11177 // Read the base type. 11178 switch (*Str++) { 11179 default: llvm_unreachable("Unknown builtin type letter!"); 11180 case 'x': 11181 assert(HowLong == 0 && !Signed && !Unsigned && 11182 "Bad modifiers used with 'x'!"); 11183 Type = Context.Float16Ty; 11184 break; 11185 case 'y': 11186 assert(HowLong == 0 && !Signed && !Unsigned && 11187 "Bad modifiers used with 'y'!"); 11188 Type = Context.BFloat16Ty; 11189 break; 11190 case 'v': 11191 assert(HowLong == 0 && !Signed && !Unsigned && 11192 "Bad modifiers used with 'v'!"); 11193 Type = Context.VoidTy; 11194 break; 11195 case 'h': 11196 assert(HowLong == 0 && !Signed && !Unsigned && 11197 "Bad modifiers used with 'h'!"); 11198 Type = Context.HalfTy; 11199 break; 11200 case 'f': 11201 assert(HowLong == 0 && !Signed && !Unsigned && 11202 "Bad modifiers used with 'f'!"); 11203 Type = Context.FloatTy; 11204 break; 11205 case 'd': 11206 assert(HowLong < 3 && !Signed && !Unsigned && 11207 "Bad modifiers used with 'd'!"); 11208 if (HowLong == 1) 11209 Type = Context.LongDoubleTy; 11210 else if (HowLong == 2) 11211 Type = Context.Float128Ty; 11212 else 11213 Type = Context.DoubleTy; 11214 break; 11215 case 's': 11216 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11217 if (Unsigned) 11218 Type = Context.UnsignedShortTy; 11219 else 11220 Type = Context.ShortTy; 11221 break; 11222 case 'i': 11223 if (HowLong == 3) 11224 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11225 else if (HowLong == 2) 11226 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11227 else if (HowLong == 1) 11228 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11229 else 11230 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11231 break; 11232 case 'c': 11233 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11234 if (Signed) 11235 Type = Context.SignedCharTy; 11236 else if (Unsigned) 11237 Type = Context.UnsignedCharTy; 11238 else 11239 Type = Context.CharTy; 11240 break; 11241 case 'b': // boolean 11242 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11243 Type = Context.BoolTy; 11244 break; 11245 case 'z': // size_t. 11246 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11247 Type = Context.getSizeType(); 11248 break; 11249 case 'w': // wchar_t. 11250 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11251 Type = Context.getWideCharType(); 11252 break; 11253 case 'F': 11254 Type = Context.getCFConstantStringType(); 11255 break; 11256 case 'G': 11257 Type = Context.getObjCIdType(); 11258 break; 11259 case 'H': 11260 Type = Context.getObjCSelType(); 11261 break; 11262 case 'M': 11263 Type = Context.getObjCSuperType(); 11264 break; 11265 case 'a': 11266 Type = Context.getBuiltinVaListType(); 11267 assert(!Type.isNull() && "builtin va list type not initialized!"); 11268 break; 11269 case 'A': 11270 // This is a "reference" to a va_list; however, what exactly 11271 // this means depends on how va_list is defined. There are two 11272 // different kinds of va_list: ones passed by value, and ones 11273 // passed by reference. An example of a by-value va_list is 11274 // x86, where va_list is a char*. An example of by-ref va_list 11275 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11276 // we want this argument to be a char*&; for x86-64, we want 11277 // it to be a __va_list_tag*. 11278 Type = Context.getBuiltinVaListType(); 11279 assert(!Type.isNull() && "builtin va list type not initialized!"); 11280 if (Type->isArrayType()) 11281 Type = Context.getArrayDecayedType(Type); 11282 else 11283 Type = Context.getLValueReferenceType(Type); 11284 break; 11285 case 'q': { 11286 char *End; 11287 unsigned NumElements = strtoul(Str, &End, 10); 11288 assert(End != Str && "Missing vector size"); 11289 Str = End; 11290 11291 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11292 RequiresICE, false); 11293 assert(!RequiresICE && "Can't require vector ICE"); 11294 11295 Type = Context.getScalableVectorType(ElementType, NumElements); 11296 break; 11297 } 11298 case 'V': { 11299 char *End; 11300 unsigned NumElements = strtoul(Str, &End, 10); 11301 assert(End != Str && "Missing vector size"); 11302 Str = End; 11303 11304 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11305 RequiresICE, false); 11306 assert(!RequiresICE && "Can't require vector ICE"); 11307 11308 // TODO: No way to make AltiVec vectors in builtins yet. 11309 Type = Context.getVectorType(ElementType, NumElements, 11310 VectorType::GenericVector); 11311 break; 11312 } 11313 case 'E': { 11314 char *End; 11315 11316 unsigned NumElements = strtoul(Str, &End, 10); 11317 assert(End != Str && "Missing vector size"); 11318 11319 Str = End; 11320 11321 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11322 false); 11323 Type = Context.getExtVectorType(ElementType, NumElements); 11324 break; 11325 } 11326 case 'X': { 11327 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11328 false); 11329 assert(!RequiresICE && "Can't require complex ICE"); 11330 Type = Context.getComplexType(ElementType); 11331 break; 11332 } 11333 case 'Y': 11334 Type = Context.getPointerDiffType(); 11335 break; 11336 case 'P': 11337 Type = Context.getFILEType(); 11338 if (Type.isNull()) { 11339 Error = ASTContext::GE_Missing_stdio; 11340 return {}; 11341 } 11342 break; 11343 case 'J': 11344 if (Signed) 11345 Type = Context.getsigjmp_bufType(); 11346 else 11347 Type = Context.getjmp_bufType(); 11348 11349 if (Type.isNull()) { 11350 Error = ASTContext::GE_Missing_setjmp; 11351 return {}; 11352 } 11353 break; 11354 case 'K': 11355 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11356 Type = Context.getucontext_tType(); 11357 11358 if (Type.isNull()) { 11359 Error = ASTContext::GE_Missing_ucontext; 11360 return {}; 11361 } 11362 break; 11363 case 'p': 11364 Type = Context.getProcessIDType(); 11365 break; 11366 } 11367 11368 // If there are modifiers and if we're allowed to parse them, go for it. 11369 Done = !AllowTypeModifiers; 11370 while (!Done) { 11371 switch (char c = *Str++) { 11372 default: Done = true; --Str; break; 11373 case '*': 11374 case '&': { 11375 // Both pointers and references can have their pointee types 11376 // qualified with an address space. 11377 char *End; 11378 unsigned AddrSpace = strtoul(Str, &End, 10); 11379 if (End != Str) { 11380 // Note AddrSpace == 0 is not the same as an unspecified address space. 11381 Type = Context.getAddrSpaceQualType( 11382 Type, 11383 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11384 Str = End; 11385 } 11386 if (c == '*') 11387 Type = Context.getPointerType(Type); 11388 else 11389 Type = Context.getLValueReferenceType(Type); 11390 break; 11391 } 11392 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11393 case 'C': 11394 Type = Type.withConst(); 11395 break; 11396 case 'D': 11397 Type = Context.getVolatileType(Type); 11398 break; 11399 case 'R': 11400 Type = Type.withRestrict(); 11401 break; 11402 } 11403 } 11404 11405 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11406 "Integer constant 'I' type must be an integer"); 11407 11408 return Type; 11409 } 11410 11411 // On some targets such as PowerPC, some of the builtins are defined with custom 11412 // type descriptors for target-dependent types. These descriptors are decoded in 11413 // other functions, but it may be useful to be able to fall back to default 11414 // descriptor decoding to define builtins mixing target-dependent and target- 11415 // independent types. This function allows decoding one type descriptor with 11416 // default decoding. 11417 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11418 GetBuiltinTypeError &Error, bool &RequireICE, 11419 bool AllowTypeModifiers) const { 11420 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11421 } 11422 11423 /// GetBuiltinType - Return the type for the specified builtin. 11424 QualType ASTContext::GetBuiltinType(unsigned Id, 11425 GetBuiltinTypeError &Error, 11426 unsigned *IntegerConstantArgs) const { 11427 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11428 if (TypeStr[0] == '\0') { 11429 Error = GE_Missing_type; 11430 return {}; 11431 } 11432 11433 SmallVector<QualType, 8> ArgTypes; 11434 11435 bool RequiresICE = false; 11436 Error = GE_None; 11437 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11438 RequiresICE, true); 11439 if (Error != GE_None) 11440 return {}; 11441 11442 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11443 11444 while (TypeStr[0] && TypeStr[0] != '.') { 11445 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11446 if (Error != GE_None) 11447 return {}; 11448 11449 // If this argument is required to be an IntegerConstantExpression and the 11450 // caller cares, fill in the bitmask we return. 11451 if (RequiresICE && IntegerConstantArgs) 11452 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11453 11454 // Do array -> pointer decay. The builtin should use the decayed type. 11455 if (Ty->isArrayType()) 11456 Ty = getArrayDecayedType(Ty); 11457 11458 ArgTypes.push_back(Ty); 11459 } 11460 11461 if (Id == Builtin::BI__GetExceptionInfo) 11462 return {}; 11463 11464 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11465 "'.' should only occur at end of builtin type list!"); 11466 11467 bool Variadic = (TypeStr[0] == '.'); 11468 11469 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11470 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11471 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11472 11473 11474 // We really shouldn't be making a no-proto type here. 11475 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 11476 return getFunctionNoProtoType(ResType, EI); 11477 11478 FunctionProtoType::ExtProtoInfo EPI; 11479 EPI.ExtInfo = EI; 11480 EPI.Variadic = Variadic; 11481 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11482 EPI.ExceptionSpec.Type = 11483 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11484 11485 return getFunctionType(ResType, ArgTypes, EPI); 11486 } 11487 11488 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11489 const FunctionDecl *FD) { 11490 if (!FD->isExternallyVisible()) 11491 return GVA_Internal; 11492 11493 // Non-user-provided functions get emitted as weak definitions with every 11494 // use, no matter whether they've been explicitly instantiated etc. 11495 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 11496 if (!MD->isUserProvided()) 11497 return GVA_DiscardableODR; 11498 11499 GVALinkage External; 11500 switch (FD->getTemplateSpecializationKind()) { 11501 case TSK_Undeclared: 11502 case TSK_ExplicitSpecialization: 11503 External = GVA_StrongExternal; 11504 break; 11505 11506 case TSK_ExplicitInstantiationDefinition: 11507 return GVA_StrongODR; 11508 11509 // C++11 [temp.explicit]p10: 11510 // [ Note: The intent is that an inline function that is the subject of 11511 // an explicit instantiation declaration will still be implicitly 11512 // instantiated when used so that the body can be considered for 11513 // inlining, but that no out-of-line copy of the inline function would be 11514 // generated in the translation unit. -- end note ] 11515 case TSK_ExplicitInstantiationDeclaration: 11516 return GVA_AvailableExternally; 11517 11518 case TSK_ImplicitInstantiation: 11519 External = GVA_DiscardableODR; 11520 break; 11521 } 11522 11523 if (!FD->isInlined()) 11524 return External; 11525 11526 if ((!Context.getLangOpts().CPlusPlus && 11527 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11528 !FD->hasAttr<DLLExportAttr>()) || 11529 FD->hasAttr<GNUInlineAttr>()) { 11530 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11531 11532 // GNU or C99 inline semantics. Determine whether this symbol should be 11533 // externally visible. 11534 if (FD->isInlineDefinitionExternallyVisible()) 11535 return External; 11536 11537 // C99 inline semantics, where the symbol is not externally visible. 11538 return GVA_AvailableExternally; 11539 } 11540 11541 // Functions specified with extern and inline in -fms-compatibility mode 11542 // forcibly get emitted. While the body of the function cannot be later 11543 // replaced, the function definition cannot be discarded. 11544 if (FD->isMSExternInline()) 11545 return GVA_StrongODR; 11546 11547 return GVA_DiscardableODR; 11548 } 11549 11550 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11551 const Decl *D, GVALinkage L) { 11552 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11553 // dllexport/dllimport on inline functions. 11554 if (D->hasAttr<DLLImportAttr>()) { 11555 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11556 return GVA_AvailableExternally; 11557 } else if (D->hasAttr<DLLExportAttr>()) { 11558 if (L == GVA_DiscardableODR) 11559 return GVA_StrongODR; 11560 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11561 // Device-side functions with __global__ attribute must always be 11562 // visible externally so they can be launched from host. 11563 if (D->hasAttr<CUDAGlobalAttr>() && 11564 (L == GVA_DiscardableODR || L == GVA_Internal)) 11565 return GVA_StrongODR; 11566 // Single source offloading languages like CUDA/HIP need to be able to 11567 // access static device variables from host code of the same compilation 11568 // unit. This is done by externalizing the static variable with a shared 11569 // name between the host and device compilation which is the same for the 11570 // same compilation unit whereas different among different compilation 11571 // units. 11572 if (Context.shouldExternalize(D)) 11573 return GVA_StrongExternal; 11574 } 11575 return L; 11576 } 11577 11578 /// Adjust the GVALinkage for a declaration based on what an external AST source 11579 /// knows about whether there can be other definitions of this declaration. 11580 static GVALinkage 11581 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11582 GVALinkage L) { 11583 ExternalASTSource *Source = Ctx.getExternalSource(); 11584 if (!Source) 11585 return L; 11586 11587 switch (Source->hasExternalDefinitions(D)) { 11588 case ExternalASTSource::EK_Never: 11589 // Other translation units rely on us to provide the definition. 11590 if (L == GVA_DiscardableODR) 11591 return GVA_StrongODR; 11592 break; 11593 11594 case ExternalASTSource::EK_Always: 11595 return GVA_AvailableExternally; 11596 11597 case ExternalASTSource::EK_ReplyHazy: 11598 break; 11599 } 11600 return L; 11601 } 11602 11603 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11604 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11605 adjustGVALinkageForAttributes(*this, FD, 11606 basicGVALinkageForFunction(*this, FD))); 11607 } 11608 11609 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11610 const VarDecl *VD) { 11611 if (!VD->isExternallyVisible()) 11612 return GVA_Internal; 11613 11614 if (VD->isStaticLocal()) { 11615 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11616 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11617 LexicalContext = LexicalContext->getLexicalParent(); 11618 11619 // ObjC Blocks can create local variables that don't have a FunctionDecl 11620 // LexicalContext. 11621 if (!LexicalContext) 11622 return GVA_DiscardableODR; 11623 11624 // Otherwise, let the static local variable inherit its linkage from the 11625 // nearest enclosing function. 11626 auto StaticLocalLinkage = 11627 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11628 11629 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11630 // be emitted in any object with references to the symbol for the object it 11631 // contains, whether inline or out-of-line." 11632 // Similar behavior is observed with MSVC. An alternative ABI could use 11633 // StrongODR/AvailableExternally to match the function, but none are 11634 // known/supported currently. 11635 if (StaticLocalLinkage == GVA_StrongODR || 11636 StaticLocalLinkage == GVA_AvailableExternally) 11637 return GVA_DiscardableODR; 11638 return StaticLocalLinkage; 11639 } 11640 11641 // MSVC treats in-class initialized static data members as definitions. 11642 // By giving them non-strong linkage, out-of-line definitions won't 11643 // cause link errors. 11644 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11645 return GVA_DiscardableODR; 11646 11647 // Most non-template variables have strong linkage; inline variables are 11648 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11649 GVALinkage StrongLinkage; 11650 switch (Context.getInlineVariableDefinitionKind(VD)) { 11651 case ASTContext::InlineVariableDefinitionKind::None: 11652 StrongLinkage = GVA_StrongExternal; 11653 break; 11654 case ASTContext::InlineVariableDefinitionKind::Weak: 11655 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11656 StrongLinkage = GVA_DiscardableODR; 11657 break; 11658 case ASTContext::InlineVariableDefinitionKind::Strong: 11659 StrongLinkage = GVA_StrongODR; 11660 break; 11661 } 11662 11663 switch (VD->getTemplateSpecializationKind()) { 11664 case TSK_Undeclared: 11665 return StrongLinkage; 11666 11667 case TSK_ExplicitSpecialization: 11668 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11669 VD->isStaticDataMember() 11670 ? GVA_StrongODR 11671 : StrongLinkage; 11672 11673 case TSK_ExplicitInstantiationDefinition: 11674 return GVA_StrongODR; 11675 11676 case TSK_ExplicitInstantiationDeclaration: 11677 return GVA_AvailableExternally; 11678 11679 case TSK_ImplicitInstantiation: 11680 return GVA_DiscardableODR; 11681 } 11682 11683 llvm_unreachable("Invalid Linkage!"); 11684 } 11685 11686 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { 11687 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11688 adjustGVALinkageForAttributes(*this, VD, 11689 basicGVALinkageForVariable(*this, VD))); 11690 } 11691 11692 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11693 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11694 if (!VD->isFileVarDecl()) 11695 return false; 11696 // Global named register variables (GNU extension) are never emitted. 11697 if (VD->getStorageClass() == SC_Register) 11698 return false; 11699 if (VD->getDescribedVarTemplate() || 11700 isa<VarTemplatePartialSpecializationDecl>(VD)) 11701 return false; 11702 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11703 // We never need to emit an uninstantiated function template. 11704 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11705 return false; 11706 } else if (isa<PragmaCommentDecl>(D)) 11707 return true; 11708 else if (isa<PragmaDetectMismatchDecl>(D)) 11709 return true; 11710 else if (isa<OMPRequiresDecl>(D)) 11711 return true; 11712 else if (isa<OMPThreadPrivateDecl>(D)) 11713 return !D->getDeclContext()->isDependentContext(); 11714 else if (isa<OMPAllocateDecl>(D)) 11715 return !D->getDeclContext()->isDependentContext(); 11716 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11717 return !D->getDeclContext()->isDependentContext(); 11718 else if (isa<ImportDecl>(D)) 11719 return true; 11720 else 11721 return false; 11722 11723 // If this is a member of a class template, we do not need to emit it. 11724 if (D->getDeclContext()->isDependentContext()) 11725 return false; 11726 11727 // Weak references don't produce any output by themselves. 11728 if (D->hasAttr<WeakRefAttr>()) 11729 return false; 11730 11731 // Aliases and used decls are required. 11732 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11733 return true; 11734 11735 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11736 // Forward declarations aren't required. 11737 if (!FD->doesThisDeclarationHaveABody()) 11738 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11739 11740 // Constructors and destructors are required. 11741 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11742 return true; 11743 11744 // The key function for a class is required. This rule only comes 11745 // into play when inline functions can be key functions, though. 11746 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11747 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11748 const CXXRecordDecl *RD = MD->getParent(); 11749 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11750 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11751 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11752 return true; 11753 } 11754 } 11755 } 11756 11757 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11758 11759 // static, static inline, always_inline, and extern inline functions can 11760 // always be deferred. Normal inline functions can be deferred in C99/C++. 11761 // Implicit template instantiations can also be deferred in C++. 11762 return !isDiscardableGVALinkage(Linkage); 11763 } 11764 11765 const auto *VD = cast<VarDecl>(D); 11766 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11767 11768 // If the decl is marked as `declare target to`, it should be emitted for the 11769 // host and for the device. 11770 if (LangOpts.OpenMP && 11771 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11772 return true; 11773 11774 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11775 !isMSStaticDataMemberInlineDefinition(VD)) 11776 return false; 11777 11778 // Variables that can be needed in other TUs are required. 11779 auto Linkage = GetGVALinkageForVariable(VD); 11780 if (!isDiscardableGVALinkage(Linkage)) 11781 return true; 11782 11783 // We never need to emit a variable that is available in another TU. 11784 if (Linkage == GVA_AvailableExternally) 11785 return false; 11786 11787 // Variables that have destruction with side-effects are required. 11788 if (VD->needsDestruction(*this)) 11789 return true; 11790 11791 // Variables that have initialization with side-effects are required. 11792 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11793 // We can get a value-dependent initializer during error recovery. 11794 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11795 return true; 11796 11797 // Likewise, variables with tuple-like bindings are required if their 11798 // bindings have side-effects. 11799 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11800 for (const auto *BD : DD->bindings()) 11801 if (const auto *BindingVD = BD->getHoldingVar()) 11802 if (DeclMustBeEmitted(BindingVD)) 11803 return true; 11804 11805 return false; 11806 } 11807 11808 void ASTContext::forEachMultiversionedFunctionVersion( 11809 const FunctionDecl *FD, 11810 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11811 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11812 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11813 FD = FD->getMostRecentDecl(); 11814 // FIXME: The order of traversal here matters and depends on the order of 11815 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11816 // shouldn't rely on that. 11817 for (auto *CurDecl : 11818 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11819 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11820 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11821 !llvm::is_contained(SeenDecls, CurFD)) { 11822 SeenDecls.insert(CurFD); 11823 Pred(CurFD); 11824 } 11825 } 11826 } 11827 11828 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11829 bool IsCXXMethod, 11830 bool IsBuiltin) const { 11831 // Pass through to the C++ ABI object 11832 if (IsCXXMethod) 11833 return ABI->getDefaultMethodCallConv(IsVariadic); 11834 11835 // Builtins ignore user-specified default calling convention and remain the 11836 // Target's default calling convention. 11837 if (!IsBuiltin) { 11838 switch (LangOpts.getDefaultCallingConv()) { 11839 case LangOptions::DCC_None: 11840 break; 11841 case LangOptions::DCC_CDecl: 11842 return CC_C; 11843 case LangOptions::DCC_FastCall: 11844 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11845 return CC_X86FastCall; 11846 break; 11847 case LangOptions::DCC_StdCall: 11848 if (!IsVariadic) 11849 return CC_X86StdCall; 11850 break; 11851 case LangOptions::DCC_VectorCall: 11852 // __vectorcall cannot be applied to variadic functions. 11853 if (!IsVariadic) 11854 return CC_X86VectorCall; 11855 break; 11856 case LangOptions::DCC_RegCall: 11857 // __regcall cannot be applied to variadic functions. 11858 if (!IsVariadic) 11859 return CC_X86RegCall; 11860 break; 11861 } 11862 } 11863 return Target->getDefaultCallingConv(); 11864 } 11865 11866 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11867 // Pass through to the C++ ABI object 11868 return ABI->isNearlyEmpty(RD); 11869 } 11870 11871 VTableContextBase *ASTContext::getVTableContext() { 11872 if (!VTContext.get()) { 11873 auto ABI = Target->getCXXABI(); 11874 if (ABI.isMicrosoft()) 11875 VTContext.reset(new MicrosoftVTableContext(*this)); 11876 else { 11877 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11878 ? ItaniumVTableContext::Relative 11879 : ItaniumVTableContext::Pointer; 11880 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 11881 } 11882 } 11883 return VTContext.get(); 11884 } 11885 11886 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 11887 if (!T) 11888 T = Target; 11889 switch (T->getCXXABI().getKind()) { 11890 case TargetCXXABI::AppleARM64: 11891 case TargetCXXABI::Fuchsia: 11892 case TargetCXXABI::GenericAArch64: 11893 case TargetCXXABI::GenericItanium: 11894 case TargetCXXABI::GenericARM: 11895 case TargetCXXABI::GenericMIPS: 11896 case TargetCXXABI::iOS: 11897 case TargetCXXABI::WebAssembly: 11898 case TargetCXXABI::WatchOS: 11899 case TargetCXXABI::XL: 11900 return ItaniumMangleContext::create(*this, getDiagnostics()); 11901 case TargetCXXABI::Microsoft: 11902 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11903 } 11904 llvm_unreachable("Unsupported ABI"); 11905 } 11906 11907 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 11908 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 11909 "Device mangle context does not support Microsoft mangling."); 11910 switch (T.getCXXABI().getKind()) { 11911 case TargetCXXABI::AppleARM64: 11912 case TargetCXXABI::Fuchsia: 11913 case TargetCXXABI::GenericAArch64: 11914 case TargetCXXABI::GenericItanium: 11915 case TargetCXXABI::GenericARM: 11916 case TargetCXXABI::GenericMIPS: 11917 case TargetCXXABI::iOS: 11918 case TargetCXXABI::WebAssembly: 11919 case TargetCXXABI::WatchOS: 11920 case TargetCXXABI::XL: 11921 return ItaniumMangleContext::create( 11922 *this, getDiagnostics(), 11923 [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> { 11924 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 11925 return RD->getDeviceLambdaManglingNumber(); 11926 return std::nullopt; 11927 }, 11928 /*IsAux=*/true); 11929 case TargetCXXABI::Microsoft: 11930 return MicrosoftMangleContext::create(*this, getDiagnostics(), 11931 /*IsAux=*/true); 11932 } 11933 llvm_unreachable("Unsupported ABI"); 11934 } 11935 11936 CXXABI::~CXXABI() = default; 11937 11938 size_t ASTContext::getSideTableAllocatedMemory() const { 11939 return ASTRecordLayouts.getMemorySize() + 11940 llvm::capacity_in_bytes(ObjCLayouts) + 11941 llvm::capacity_in_bytes(KeyFunctions) + 11942 llvm::capacity_in_bytes(ObjCImpls) + 11943 llvm::capacity_in_bytes(BlockVarCopyInits) + 11944 llvm::capacity_in_bytes(DeclAttrs) + 11945 llvm::capacity_in_bytes(TemplateOrInstantiation) + 11946 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 11947 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 11948 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 11949 llvm::capacity_in_bytes(OverriddenMethods) + 11950 llvm::capacity_in_bytes(Types) + 11951 llvm::capacity_in_bytes(VariableArrayTypes); 11952 } 11953 11954 /// getIntTypeForBitwidth - 11955 /// sets integer QualTy according to specified details: 11956 /// bitwidth, signed/unsigned. 11957 /// Returns empty type if there is no appropriate target types. 11958 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 11959 unsigned Signed) const { 11960 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 11961 CanQualType QualTy = getFromTargetType(Ty); 11962 if (!QualTy && DestWidth == 128) 11963 return Signed ? Int128Ty : UnsignedInt128Ty; 11964 return QualTy; 11965 } 11966 11967 /// getRealTypeForBitwidth - 11968 /// sets floating point QualTy according to specified bitwidth. 11969 /// Returns empty type if there is no appropriate target types. 11970 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 11971 FloatModeKind ExplicitType) const { 11972 FloatModeKind Ty = 11973 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 11974 switch (Ty) { 11975 case FloatModeKind::Half: 11976 return HalfTy; 11977 case FloatModeKind::Float: 11978 return FloatTy; 11979 case FloatModeKind::Double: 11980 return DoubleTy; 11981 case FloatModeKind::LongDouble: 11982 return LongDoubleTy; 11983 case FloatModeKind::Float128: 11984 return Float128Ty; 11985 case FloatModeKind::Ibm128: 11986 return Ibm128Ty; 11987 case FloatModeKind::NoFloat: 11988 return {}; 11989 } 11990 11991 llvm_unreachable("Unhandled TargetInfo::RealType value"); 11992 } 11993 11994 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 11995 if (Number > 1) 11996 MangleNumbers[ND] = Number; 11997 } 11998 11999 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 12000 bool ForAuxTarget) const { 12001 auto I = MangleNumbers.find(ND); 12002 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 12003 // CUDA/HIP host compilation encodes host and device mangling numbers 12004 // as lower and upper half of 32 bit integer. 12005 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 12006 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 12007 } else { 12008 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 12009 "number for aux target"); 12010 } 12011 return Res > 1 ? Res : 1; 12012 } 12013 12014 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 12015 if (Number > 1) 12016 StaticLocalNumbers[VD] = Number; 12017 } 12018 12019 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 12020 auto I = StaticLocalNumbers.find(VD); 12021 return I != StaticLocalNumbers.end() ? I->second : 1; 12022 } 12023 12024 MangleNumberingContext & 12025 ASTContext::getManglingNumberContext(const DeclContext *DC) { 12026 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12027 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 12028 if (!MCtx) 12029 MCtx = createMangleNumberingContext(); 12030 return *MCtx; 12031 } 12032 12033 MangleNumberingContext & 12034 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 12035 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12036 std::unique_ptr<MangleNumberingContext> &MCtx = 12037 ExtraMangleNumberingContexts[D]; 12038 if (!MCtx) 12039 MCtx = createMangleNumberingContext(); 12040 return *MCtx; 12041 } 12042 12043 std::unique_ptr<MangleNumberingContext> 12044 ASTContext::createMangleNumberingContext() const { 12045 return ABI->createMangleNumberingContext(); 12046 } 12047 12048 const CXXConstructorDecl * 12049 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 12050 return ABI->getCopyConstructorForExceptionObject( 12051 cast<CXXRecordDecl>(RD->getFirstDecl())); 12052 } 12053 12054 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 12055 CXXConstructorDecl *CD) { 12056 return ABI->addCopyConstructorForExceptionObject( 12057 cast<CXXRecordDecl>(RD->getFirstDecl()), 12058 cast<CXXConstructorDecl>(CD->getFirstDecl())); 12059 } 12060 12061 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 12062 TypedefNameDecl *DD) { 12063 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 12064 } 12065 12066 TypedefNameDecl * 12067 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 12068 return ABI->getTypedefNameForUnnamedTagDecl(TD); 12069 } 12070 12071 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 12072 DeclaratorDecl *DD) { 12073 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 12074 } 12075 12076 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 12077 return ABI->getDeclaratorForUnnamedTagDecl(TD); 12078 } 12079 12080 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 12081 ParamIndices[D] = index; 12082 } 12083 12084 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 12085 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 12086 assert(I != ParamIndices.end() && 12087 "ParmIndices lacks entry set by ParmVarDecl"); 12088 return I->second; 12089 } 12090 12091 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 12092 unsigned Length) const { 12093 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 12094 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 12095 EltTy = EltTy.withConst(); 12096 12097 EltTy = adjustStringLiteralBaseType(EltTy); 12098 12099 // Get an array type for the string, according to C99 6.4.5. This includes 12100 // the null terminator character. 12101 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 12102 ArrayType::Normal, /*IndexTypeQuals*/ 0); 12103 } 12104 12105 StringLiteral * 12106 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 12107 StringLiteral *&Result = StringLiteralCache[Key]; 12108 if (!Result) 12109 Result = StringLiteral::Create( 12110 *this, Key, StringLiteral::Ordinary, 12111 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 12112 SourceLocation()); 12113 return Result; 12114 } 12115 12116 MSGuidDecl * 12117 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 12118 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 12119 12120 llvm::FoldingSetNodeID ID; 12121 MSGuidDecl::Profile(ID, Parts); 12122 12123 void *InsertPos; 12124 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 12125 return Existing; 12126 12127 QualType GUIDType = getMSGuidType().withConst(); 12128 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 12129 MSGuidDecls.InsertNode(New, InsertPos); 12130 return New; 12131 } 12132 12133 UnnamedGlobalConstantDecl * 12134 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 12135 const APValue &APVal) const { 12136 llvm::FoldingSetNodeID ID; 12137 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 12138 12139 void *InsertPos; 12140 if (UnnamedGlobalConstantDecl *Existing = 12141 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 12142 return Existing; 12143 12144 UnnamedGlobalConstantDecl *New = 12145 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 12146 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 12147 return New; 12148 } 12149 12150 TemplateParamObjectDecl * 12151 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 12152 assert(T->isRecordType() && "template param object of unexpected type"); 12153 12154 // C++ [temp.param]p8: 12155 // [...] a static storage duration object of type 'const T' [...] 12156 T.addConst(); 12157 12158 llvm::FoldingSetNodeID ID; 12159 TemplateParamObjectDecl::Profile(ID, T, V); 12160 12161 void *InsertPos; 12162 if (TemplateParamObjectDecl *Existing = 12163 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 12164 return Existing; 12165 12166 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 12167 TemplateParamObjectDecls.InsertNode(New, InsertPos); 12168 return New; 12169 } 12170 12171 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 12172 const llvm::Triple &T = getTargetInfo().getTriple(); 12173 if (!T.isOSDarwin()) 12174 return false; 12175 12176 if (!(T.isiOS() && T.isOSVersionLT(7)) && 12177 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 12178 return false; 12179 12180 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 12181 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 12182 uint64_t Size = sizeChars.getQuantity(); 12183 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 12184 unsigned Align = alignChars.getQuantity(); 12185 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 12186 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 12187 } 12188 12189 bool 12190 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 12191 const ObjCMethodDecl *MethodImpl) { 12192 // No point trying to match an unavailable/deprecated mothod. 12193 if (MethodDecl->hasAttr<UnavailableAttr>() 12194 || MethodDecl->hasAttr<DeprecatedAttr>()) 12195 return false; 12196 if (MethodDecl->getObjCDeclQualifier() != 12197 MethodImpl->getObjCDeclQualifier()) 12198 return false; 12199 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 12200 return false; 12201 12202 if (MethodDecl->param_size() != MethodImpl->param_size()) 12203 return false; 12204 12205 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 12206 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 12207 EF = MethodDecl->param_end(); 12208 IM != EM && IF != EF; ++IM, ++IF) { 12209 const ParmVarDecl *DeclVar = (*IF); 12210 const ParmVarDecl *ImplVar = (*IM); 12211 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 12212 return false; 12213 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 12214 return false; 12215 } 12216 12217 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12218 } 12219 12220 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12221 LangAS AS; 12222 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12223 AS = LangAS::Default; 12224 else 12225 AS = QT->getPointeeType().getAddressSpace(); 12226 12227 return getTargetInfo().getNullPointerValue(AS); 12228 } 12229 12230 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12231 return getTargetInfo().getTargetAddressSpace(AS); 12232 } 12233 12234 bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const { 12235 if (X == Y) 12236 return true; 12237 if (!X || !Y) 12238 return false; 12239 llvm::FoldingSetNodeID IDX, IDY; 12240 X->Profile(IDX, *this, /*Canonical=*/true); 12241 Y->Profile(IDY, *this, /*Canonical=*/true); 12242 return IDX == IDY; 12243 } 12244 12245 // The getCommon* helpers return, for given 'same' X and Y entities given as 12246 // inputs, another entity which is also the 'same' as the inputs, but which 12247 // is closer to the canonical form of the inputs, each according to a given 12248 // criteria. 12249 // The getCommon*Checked variants are 'null inputs not-allowed' equivalents of 12250 // the regular ones. 12251 12252 static Decl *getCommonDecl(Decl *X, Decl *Y) { 12253 if (!declaresSameEntity(X, Y)) 12254 return nullptr; 12255 for (const Decl *DX : X->redecls()) { 12256 // If we reach Y before reaching the first decl, that means X is older. 12257 if (DX == Y) 12258 return X; 12259 // If we reach the first decl, then Y is older. 12260 if (DX->isFirstDecl()) 12261 return Y; 12262 } 12263 llvm_unreachable("Corrupt redecls chain"); 12264 } 12265 12266 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12267 static T *getCommonDecl(T *X, T *Y) { 12268 return cast_or_null<T>( 12269 getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)), 12270 const_cast<Decl *>(cast_or_null<Decl>(Y)))); 12271 } 12272 12273 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12274 static T *getCommonDeclChecked(T *X, T *Y) { 12275 return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)), 12276 const_cast<Decl *>(cast<Decl>(Y)))); 12277 } 12278 12279 static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X, 12280 TemplateName Y) { 12281 if (X.getAsVoidPointer() == Y.getAsVoidPointer()) 12282 return X; 12283 // FIXME: There are cases here where we could find a common template name 12284 // with more sugar. For example one could be a SubstTemplateTemplate* 12285 // replacing the other. 12286 TemplateName CX = Ctx.getCanonicalTemplateName(X); 12287 if (CX.getAsVoidPointer() != 12288 Ctx.getCanonicalTemplateName(Y).getAsVoidPointer()) 12289 return TemplateName(); 12290 return CX; 12291 } 12292 12293 static TemplateName 12294 getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) { 12295 TemplateName R = getCommonTemplateName(Ctx, X, Y); 12296 assert(R.getAsVoidPointer() != nullptr); 12297 return R; 12298 } 12299 12300 static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs, 12301 ArrayRef<QualType> Ys, bool Unqualified = false) { 12302 assert(Xs.size() == Ys.size()); 12303 SmallVector<QualType, 8> Rs(Xs.size()); 12304 for (size_t I = 0; I < Rs.size(); ++I) 12305 Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified); 12306 return Rs; 12307 } 12308 12309 template <class T> 12310 static SourceLocation getCommonAttrLoc(const T *X, const T *Y) { 12311 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc() 12312 : SourceLocation(); 12313 } 12314 12315 static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx, 12316 const TemplateArgument &X, 12317 const TemplateArgument &Y) { 12318 if (X.getKind() != Y.getKind()) 12319 return TemplateArgument(); 12320 12321 switch (X.getKind()) { 12322 case TemplateArgument::ArgKind::Type: 12323 if (!Ctx.hasSameType(X.getAsType(), Y.getAsType())) 12324 return TemplateArgument(); 12325 return TemplateArgument( 12326 Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType())); 12327 case TemplateArgument::ArgKind::NullPtr: 12328 if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) 12329 return TemplateArgument(); 12330 return TemplateArgument( 12331 Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()), 12332 /*Unqualified=*/true); 12333 case TemplateArgument::ArgKind::Expression: 12334 if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType())) 12335 return TemplateArgument(); 12336 // FIXME: Try to keep the common sugar. 12337 return X; 12338 case TemplateArgument::ArgKind::Template: { 12339 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate(); 12340 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12341 if (!CTN.getAsVoidPointer()) 12342 return TemplateArgument(); 12343 return TemplateArgument(CTN); 12344 } 12345 case TemplateArgument::ArgKind::TemplateExpansion: { 12346 TemplateName TX = X.getAsTemplateOrTemplatePattern(), 12347 TY = Y.getAsTemplateOrTemplatePattern(); 12348 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12349 if (!CTN.getAsVoidPointer()) 12350 return TemplateName(); 12351 auto NExpX = X.getNumTemplateExpansions(); 12352 assert(NExpX == Y.getNumTemplateExpansions()); 12353 return TemplateArgument(CTN, NExpX); 12354 } 12355 default: 12356 // FIXME: Handle the other argument kinds. 12357 return X; 12358 } 12359 } 12360 12361 static bool getCommonTemplateArguments(ASTContext &Ctx, 12362 SmallVectorImpl<TemplateArgument> &R, 12363 ArrayRef<TemplateArgument> Xs, 12364 ArrayRef<TemplateArgument> Ys) { 12365 if (Xs.size() != Ys.size()) 12366 return true; 12367 R.resize(Xs.size()); 12368 for (size_t I = 0; I < R.size(); ++I) { 12369 R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]); 12370 if (R[I].isNull()) 12371 return true; 12372 } 12373 return false; 12374 } 12375 12376 static auto getCommonTemplateArguments(ASTContext &Ctx, 12377 ArrayRef<TemplateArgument> Xs, 12378 ArrayRef<TemplateArgument> Ys) { 12379 SmallVector<TemplateArgument, 8> R; 12380 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys); 12381 assert(!Different); 12382 (void)Different; 12383 return R; 12384 } 12385 12386 template <class T> 12387 static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) { 12388 return X->getKeyword() == Y->getKeyword() ? X->getKeyword() 12389 : ElaboratedTypeKeyword::ETK_None; 12390 } 12391 12392 template <class T> 12393 static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X, 12394 const T *Y) { 12395 // FIXME: Try to keep the common NNS sugar. 12396 return X->getQualifier() == Y->getQualifier() 12397 ? X->getQualifier() 12398 : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier()); 12399 } 12400 12401 template <class T> 12402 static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) { 12403 return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType()); 12404 } 12405 12406 template <class T> 12407 static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X, 12408 Qualifiers &QX, const T *Y, 12409 Qualifiers &QY) { 12410 QualType EX = X->getElementType(), EY = Y->getElementType(); 12411 QualType R = Ctx.getCommonSugaredType(EX, EY, 12412 /*Unqualified=*/true); 12413 Qualifiers RQ = R.getQualifiers(); 12414 QX += EX.getQualifiers() - RQ; 12415 QY += EY.getQualifiers() - RQ; 12416 return R; 12417 } 12418 12419 template <class T> 12420 static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) { 12421 return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType()); 12422 } 12423 12424 template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) { 12425 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr())); 12426 return X->getSizeExpr(); 12427 } 12428 12429 static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) { 12430 assert(X->getSizeModifier() == Y->getSizeModifier()); 12431 return X->getSizeModifier(); 12432 } 12433 12434 static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X, 12435 const ArrayType *Y) { 12436 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers()); 12437 return X->getIndexTypeCVRQualifiers(); 12438 } 12439 12440 // Merges two type lists such that the resulting vector will contain 12441 // each type (in a canonical sense) only once, in the order they appear 12442 // from X to Y. If they occur in both X and Y, the result will contain 12443 // the common sugared type between them. 12444 static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out, 12445 ArrayRef<QualType> X, ArrayRef<QualType> Y) { 12446 llvm::DenseMap<QualType, unsigned> Found; 12447 for (auto Ts : {X, Y}) { 12448 for (QualType T : Ts) { 12449 auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size()); 12450 if (!Res.second) { 12451 QualType &U = Out[Res.first->second]; 12452 U = Ctx.getCommonSugaredType(U, T); 12453 } else { 12454 Out.emplace_back(T); 12455 } 12456 } 12457 } 12458 } 12459 12460 FunctionProtoType::ExceptionSpecInfo 12461 ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1, 12462 FunctionProtoType::ExceptionSpecInfo ESI2, 12463 SmallVectorImpl<QualType> &ExceptionTypeStorage, 12464 bool AcceptDependent) { 12465 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type; 12466 12467 // If either of them can throw anything, that is the result. 12468 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) { 12469 if (EST1 == I) 12470 return ESI1; 12471 if (EST2 == I) 12472 return ESI2; 12473 } 12474 12475 // If either of them is non-throwing, the result is the other. 12476 for (auto I : 12477 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) { 12478 if (EST1 == I) 12479 return ESI2; 12480 if (EST2 == I) 12481 return ESI1; 12482 } 12483 12484 // If we're left with value-dependent computed noexcept expressions, we're 12485 // stuck. Before C++17, we can just drop the exception specification entirely, 12486 // since it's not actually part of the canonical type. And this should never 12487 // happen in C++17, because it would mean we were computing the composite 12488 // pointer type of dependent types, which should never happen. 12489 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) { 12490 assert(AcceptDependent && 12491 "computing composite pointer type of dependent types"); 12492 return FunctionProtoType::ExceptionSpecInfo(); 12493 } 12494 12495 // Switch over the possibilities so that people adding new values know to 12496 // update this function. 12497 switch (EST1) { 12498 case EST_None: 12499 case EST_DynamicNone: 12500 case EST_MSAny: 12501 case EST_BasicNoexcept: 12502 case EST_DependentNoexcept: 12503 case EST_NoexceptFalse: 12504 case EST_NoexceptTrue: 12505 case EST_NoThrow: 12506 llvm_unreachable("These ESTs should be handled above"); 12507 12508 case EST_Dynamic: { 12509 // This is the fun case: both exception specifications are dynamic. Form 12510 // the union of the two lists. 12511 assert(EST2 == EST_Dynamic && "other cases should already be handled"); 12512 mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions, 12513 ESI2.Exceptions); 12514 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic); 12515 Result.Exceptions = ExceptionTypeStorage; 12516 return Result; 12517 } 12518 12519 case EST_Unevaluated: 12520 case EST_Uninstantiated: 12521 case EST_Unparsed: 12522 llvm_unreachable("shouldn't see unresolved exception specifications here"); 12523 } 12524 12525 llvm_unreachable("invalid ExceptionSpecificationType"); 12526 } 12527 12528 static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, 12529 Qualifiers &QX, const Type *Y, 12530 Qualifiers &QY) { 12531 Type::TypeClass TC = X->getTypeClass(); 12532 assert(TC == Y->getTypeClass()); 12533 switch (TC) { 12534 #define UNEXPECTED_TYPE(Class, Kind) \ 12535 case Type::Class: \ 12536 llvm_unreachable("Unexpected " Kind ": " #Class); 12537 12538 #define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical") 12539 #define TYPE(Class, Base) 12540 #include "clang/AST/TypeNodes.inc" 12541 12542 #define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free") 12543 SUGAR_FREE_TYPE(Builtin) 12544 SUGAR_FREE_TYPE(Decltype) 12545 SUGAR_FREE_TYPE(DeducedTemplateSpecialization) 12546 SUGAR_FREE_TYPE(DependentBitInt) 12547 SUGAR_FREE_TYPE(Enum) 12548 SUGAR_FREE_TYPE(BitInt) 12549 SUGAR_FREE_TYPE(ObjCInterface) 12550 SUGAR_FREE_TYPE(Record) 12551 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack) 12552 SUGAR_FREE_TYPE(UnresolvedUsing) 12553 #undef SUGAR_FREE_TYPE 12554 #define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique") 12555 NON_UNIQUE_TYPE(TypeOfExpr) 12556 NON_UNIQUE_TYPE(VariableArray) 12557 #undef NON_UNIQUE_TYPE 12558 12559 UNEXPECTED_TYPE(TypeOf, "sugar") 12560 12561 #undef UNEXPECTED_TYPE 12562 12563 case Type::Auto: { 12564 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 12565 assert(AX->getDeducedType().isNull()); 12566 assert(AY->getDeducedType().isNull()); 12567 assert(AX->getKeyword() == AY->getKeyword()); 12568 assert(AX->isInstantiationDependentType() == 12569 AY->isInstantiationDependentType()); 12570 auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(), 12571 AY->getTypeConstraintArguments()); 12572 return Ctx.getAutoType(QualType(), AX->getKeyword(), 12573 AX->isInstantiationDependentType(), 12574 AX->containsUnexpandedParameterPack(), 12575 getCommonDeclChecked(AX->getTypeConstraintConcept(), 12576 AY->getTypeConstraintConcept()), 12577 As); 12578 } 12579 case Type::IncompleteArray: { 12580 const auto *AX = cast<IncompleteArrayType>(X), 12581 *AY = cast<IncompleteArrayType>(Y); 12582 return Ctx.getIncompleteArrayType( 12583 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12584 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12585 } 12586 case Type::DependentSizedArray: { 12587 const auto *AX = cast<DependentSizedArrayType>(X), 12588 *AY = cast<DependentSizedArrayType>(Y); 12589 return Ctx.getDependentSizedArrayType( 12590 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12591 getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY), 12592 getCommonIndexTypeCVRQualifiers(AX, AY), 12593 AX->getBracketsRange() == AY->getBracketsRange() 12594 ? AX->getBracketsRange() 12595 : SourceRange()); 12596 } 12597 case Type::ConstantArray: { 12598 const auto *AX = cast<ConstantArrayType>(X), 12599 *AY = cast<ConstantArrayType>(Y); 12600 assert(AX->getSize() == AY->getSize()); 12601 const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) 12602 ? AX->getSizeExpr() 12603 : nullptr; 12604 return Ctx.getConstantArrayType( 12605 getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, 12606 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12607 } 12608 case Type::Atomic: { 12609 const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); 12610 return Ctx.getAtomicType( 12611 Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType())); 12612 } 12613 case Type::Complex: { 12614 const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y); 12615 return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY)); 12616 } 12617 case Type::Pointer: { 12618 const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y); 12619 return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY)); 12620 } 12621 case Type::BlockPointer: { 12622 const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y); 12623 return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY)); 12624 } 12625 case Type::ObjCObjectPointer: { 12626 const auto *PX = cast<ObjCObjectPointerType>(X), 12627 *PY = cast<ObjCObjectPointerType>(Y); 12628 return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY)); 12629 } 12630 case Type::MemberPointer: { 12631 const auto *PX = cast<MemberPointerType>(X), 12632 *PY = cast<MemberPointerType>(Y); 12633 return Ctx.getMemberPointerType( 12634 getCommonPointeeType(Ctx, PX, PY), 12635 Ctx.getCommonSugaredType(QualType(PX->getClass(), 0), 12636 QualType(PY->getClass(), 0)) 12637 .getTypePtr()); 12638 } 12639 case Type::LValueReference: { 12640 const auto *PX = cast<LValueReferenceType>(X), 12641 *PY = cast<LValueReferenceType>(Y); 12642 // FIXME: Preserve PointeeTypeAsWritten. 12643 return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY), 12644 PX->isSpelledAsLValue() || 12645 PY->isSpelledAsLValue()); 12646 } 12647 case Type::RValueReference: { 12648 const auto *PX = cast<RValueReferenceType>(X), 12649 *PY = cast<RValueReferenceType>(Y); 12650 // FIXME: Preserve PointeeTypeAsWritten. 12651 return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY)); 12652 } 12653 case Type::DependentAddressSpace: { 12654 const auto *PX = cast<DependentAddressSpaceType>(X), 12655 *PY = cast<DependentAddressSpaceType>(Y); 12656 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr())); 12657 return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY), 12658 PX->getAddrSpaceExpr(), 12659 getCommonAttrLoc(PX, PY)); 12660 } 12661 case Type::FunctionNoProto: { 12662 const auto *FX = cast<FunctionNoProtoType>(X), 12663 *FY = cast<FunctionNoProtoType>(Y); 12664 assert(FX->getExtInfo() == FY->getExtInfo()); 12665 return Ctx.getFunctionNoProtoType( 12666 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()), 12667 FX->getExtInfo()); 12668 } 12669 case Type::FunctionProto: { 12670 const auto *FX = cast<FunctionProtoType>(X), 12671 *FY = cast<FunctionProtoType>(Y); 12672 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(), 12673 EPIY = FY->getExtProtoInfo(); 12674 assert(EPIX.ExtInfo == EPIY.ExtInfo); 12675 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos); 12676 assert(EPIX.RefQualifier == EPIY.RefQualifier); 12677 assert(EPIX.TypeQuals == EPIY.TypeQuals); 12678 assert(EPIX.Variadic == EPIY.Variadic); 12679 12680 // FIXME: Can we handle an empty EllipsisLoc? 12681 // Use emtpy EllipsisLoc if X and Y differ. 12682 12683 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn; 12684 12685 QualType R = 12686 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()); 12687 auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(), 12688 /*Unqualified=*/true); 12689 12690 SmallVector<QualType, 8> Exceptions; 12691 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs( 12692 EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true); 12693 return Ctx.getFunctionType(R, P, EPIX); 12694 } 12695 case Type::ObjCObject: { 12696 const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y); 12697 assert( 12698 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(), 12699 OY->getProtocols().begin(), OY->getProtocols().end(), 12700 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) { 12701 return P0->getCanonicalDecl() == P1->getCanonicalDecl(); 12702 }) && 12703 "protocol lists must be the same"); 12704 auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(), 12705 OY->getTypeArgsAsWritten()); 12706 return Ctx.getObjCObjectType( 12707 Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs, 12708 OX->getProtocols(), 12709 OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten()); 12710 } 12711 case Type::ConstantMatrix: { 12712 const auto *MX = cast<ConstantMatrixType>(X), 12713 *MY = cast<ConstantMatrixType>(Y); 12714 assert(MX->getNumRows() == MY->getNumRows()); 12715 assert(MX->getNumColumns() == MY->getNumColumns()); 12716 return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY), 12717 MX->getNumRows(), MX->getNumColumns()); 12718 } 12719 case Type::DependentSizedMatrix: { 12720 const auto *MX = cast<DependentSizedMatrixType>(X), 12721 *MY = cast<DependentSizedMatrixType>(Y); 12722 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr())); 12723 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr())); 12724 return Ctx.getDependentSizedMatrixType( 12725 getCommonElementType(Ctx, MX, MY), MX->getRowExpr(), 12726 MX->getColumnExpr(), getCommonAttrLoc(MX, MY)); 12727 } 12728 case Type::Vector: { 12729 const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y); 12730 assert(VX->getNumElements() == VY->getNumElements()); 12731 assert(VX->getVectorKind() == VY->getVectorKind()); 12732 return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY), 12733 VX->getNumElements(), VX->getVectorKind()); 12734 } 12735 case Type::ExtVector: { 12736 const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y); 12737 assert(VX->getNumElements() == VY->getNumElements()); 12738 return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY), 12739 VX->getNumElements()); 12740 } 12741 case Type::DependentSizedExtVector: { 12742 const auto *VX = cast<DependentSizedExtVectorType>(X), 12743 *VY = cast<DependentSizedExtVectorType>(Y); 12744 return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY), 12745 getCommonSizeExpr(Ctx, VX, VY), 12746 getCommonAttrLoc(VX, VY)); 12747 } 12748 case Type::DependentVector: { 12749 const auto *VX = cast<DependentVectorType>(X), 12750 *VY = cast<DependentVectorType>(Y); 12751 assert(VX->getVectorKind() == VY->getVectorKind()); 12752 return Ctx.getDependentVectorType( 12753 getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY), 12754 getCommonAttrLoc(VX, VY), VX->getVectorKind()); 12755 } 12756 case Type::InjectedClassName: { 12757 const auto *IX = cast<InjectedClassNameType>(X), 12758 *IY = cast<InjectedClassNameType>(Y); 12759 return Ctx.getInjectedClassNameType( 12760 getCommonDeclChecked(IX->getDecl(), IY->getDecl()), 12761 Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(), 12762 IY->getInjectedSpecializationType())); 12763 } 12764 case Type::TemplateSpecialization: { 12765 const auto *TX = cast<TemplateSpecializationType>(X), 12766 *TY = cast<TemplateSpecializationType>(Y); 12767 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12768 TY->template_arguments()); 12769 return Ctx.getTemplateSpecializationType( 12770 ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(), 12771 TY->getTemplateName()), 12772 As, X->getCanonicalTypeInternal()); 12773 } 12774 case Type::DependentName: { 12775 const auto *NX = cast<DependentNameType>(X), 12776 *NY = cast<DependentNameType>(Y); 12777 assert(NX->getIdentifier() == NY->getIdentifier()); 12778 return Ctx.getDependentNameType( 12779 getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY), 12780 NX->getIdentifier(), NX->getCanonicalTypeInternal()); 12781 } 12782 case Type::DependentTemplateSpecialization: { 12783 const auto *TX = cast<DependentTemplateSpecializationType>(X), 12784 *TY = cast<DependentTemplateSpecializationType>(Y); 12785 assert(TX->getIdentifier() == TY->getIdentifier()); 12786 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12787 TY->template_arguments()); 12788 return Ctx.getDependentTemplateSpecializationType( 12789 getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY), 12790 TX->getIdentifier(), As); 12791 } 12792 case Type::UnaryTransform: { 12793 const auto *TX = cast<UnaryTransformType>(X), 12794 *TY = cast<UnaryTransformType>(Y); 12795 assert(TX->getUTTKind() == TY->getUTTKind()); 12796 return Ctx.getUnaryTransformType( 12797 Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()), 12798 Ctx.getCommonSugaredType(TX->getUnderlyingType(), 12799 TY->getUnderlyingType()), 12800 TX->getUTTKind()); 12801 } 12802 case Type::PackExpansion: { 12803 const auto *PX = cast<PackExpansionType>(X), 12804 *PY = cast<PackExpansionType>(Y); 12805 assert(PX->getNumExpansions() == PY->getNumExpansions()); 12806 return Ctx.getPackExpansionType( 12807 Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()), 12808 PX->getNumExpansions(), false); 12809 } 12810 case Type::Pipe: { 12811 const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y); 12812 assert(PX->isReadOnly() == PY->isReadOnly()); 12813 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType 12814 : &ASTContext::getWritePipeType; 12815 return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY)); 12816 } 12817 case Type::TemplateTypeParm: { 12818 const auto *TX = cast<TemplateTypeParmType>(X), 12819 *TY = cast<TemplateTypeParmType>(Y); 12820 assert(TX->getDepth() == TY->getDepth()); 12821 assert(TX->getIndex() == TY->getIndex()); 12822 assert(TX->isParameterPack() == TY->isParameterPack()); 12823 return Ctx.getTemplateTypeParmType( 12824 TX->getDepth(), TX->getIndex(), TX->isParameterPack(), 12825 getCommonDecl(TX->getDecl(), TY->getDecl())); 12826 } 12827 } 12828 llvm_unreachable("Unknown Type Class"); 12829 } 12830 12831 static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, 12832 const Type *Y, 12833 SplitQualType Underlying) { 12834 Type::TypeClass TC = X->getTypeClass(); 12835 if (TC != Y->getTypeClass()) 12836 return QualType(); 12837 switch (TC) { 12838 #define UNEXPECTED_TYPE(Class, Kind) \ 12839 case Type::Class: \ 12840 llvm_unreachable("Unexpected " Kind ": " #Class); 12841 #define TYPE(Class, Base) 12842 #define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent") 12843 #include "clang/AST/TypeNodes.inc" 12844 12845 #define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical") 12846 CANONICAL_TYPE(Atomic) 12847 CANONICAL_TYPE(BitInt) 12848 CANONICAL_TYPE(BlockPointer) 12849 CANONICAL_TYPE(Builtin) 12850 CANONICAL_TYPE(Complex) 12851 CANONICAL_TYPE(ConstantArray) 12852 CANONICAL_TYPE(ConstantMatrix) 12853 CANONICAL_TYPE(Enum) 12854 CANONICAL_TYPE(ExtVector) 12855 CANONICAL_TYPE(FunctionNoProto) 12856 CANONICAL_TYPE(FunctionProto) 12857 CANONICAL_TYPE(IncompleteArray) 12858 CANONICAL_TYPE(LValueReference) 12859 CANONICAL_TYPE(MemberPointer) 12860 CANONICAL_TYPE(ObjCInterface) 12861 CANONICAL_TYPE(ObjCObject) 12862 CANONICAL_TYPE(ObjCObjectPointer) 12863 CANONICAL_TYPE(Pipe) 12864 CANONICAL_TYPE(Pointer) 12865 CANONICAL_TYPE(Record) 12866 CANONICAL_TYPE(RValueReference) 12867 CANONICAL_TYPE(VariableArray) 12868 CANONICAL_TYPE(Vector) 12869 #undef CANONICAL_TYPE 12870 12871 #undef UNEXPECTED_TYPE 12872 12873 case Type::Adjusted: { 12874 const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y); 12875 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType(); 12876 if (!Ctx.hasSameType(OX, OY)) 12877 return QualType(); 12878 // FIXME: It's inefficient to have to unify the original types. 12879 return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY), 12880 Ctx.getQualifiedType(Underlying)); 12881 } 12882 case Type::Decayed: { 12883 const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y); 12884 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType(); 12885 if (!Ctx.hasSameType(OX, OY)) 12886 return QualType(); 12887 // FIXME: It's inefficient to have to unify the original types. 12888 return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY), 12889 Ctx.getQualifiedType(Underlying)); 12890 } 12891 case Type::Attributed: { 12892 const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y); 12893 AttributedType::Kind Kind = AX->getAttrKind(); 12894 if (Kind != AY->getAttrKind()) 12895 return QualType(); 12896 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType(); 12897 if (!Ctx.hasSameType(MX, MY)) 12898 return QualType(); 12899 // FIXME: It's inefficient to have to unify the modified types. 12900 return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY), 12901 Ctx.getQualifiedType(Underlying)); 12902 } 12903 case Type::BTFTagAttributed: { 12904 const auto *BX = cast<BTFTagAttributedType>(X); 12905 const BTFTypeTagAttr *AX = BX->getAttr(); 12906 // The attribute is not uniqued, so just compare the tag. 12907 if (AX->getBTFTypeTag() != 12908 cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag()) 12909 return QualType(); 12910 return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying)); 12911 } 12912 case Type::Auto: { 12913 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 12914 12915 AutoTypeKeyword KW = AX->getKeyword(); 12916 if (KW != AY->getKeyword()) 12917 return QualType(); 12918 12919 ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(), 12920 AY->getTypeConstraintConcept()); 12921 SmallVector<TemplateArgument, 8> As; 12922 if (CD && 12923 getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(), 12924 AY->getTypeConstraintArguments())) 12925 CD = nullptr; // The arguments differ, so make it unconstrained. 12926 12927 // Both auto types can't be dependent, otherwise they wouldn't have been 12928 // sugar. This implies they can't contain unexpanded packs either. 12929 return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), 12930 /*IsDependent=*/false, /*IsPack=*/false, CD, As); 12931 } 12932 case Type::Decltype: 12933 return QualType(); 12934 case Type::DeducedTemplateSpecialization: 12935 // FIXME: Try to merge these. 12936 return QualType(); 12937 12938 case Type::Elaborated: { 12939 const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y); 12940 return Ctx.getElaboratedType( 12941 ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY), 12942 Ctx.getQualifiedType(Underlying), 12943 ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl())); 12944 } 12945 case Type::MacroQualified: { 12946 const auto *MX = cast<MacroQualifiedType>(X), 12947 *MY = cast<MacroQualifiedType>(Y); 12948 const IdentifierInfo *IX = MX->getMacroIdentifier(); 12949 if (IX != MY->getMacroIdentifier()) 12950 return QualType(); 12951 return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX); 12952 } 12953 case Type::SubstTemplateTypeParm: { 12954 const auto *SX = cast<SubstTemplateTypeParmType>(X), 12955 *SY = cast<SubstTemplateTypeParmType>(Y); 12956 Decl *CD = 12957 ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl()); 12958 if (!CD) 12959 return QualType(); 12960 unsigned Index = SX->getIndex(); 12961 if (Index != SY->getIndex()) 12962 return QualType(); 12963 auto PackIndex = SX->getPackIndex(); 12964 if (PackIndex != SY->getPackIndex()) 12965 return QualType(); 12966 return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying), 12967 CD, Index, PackIndex); 12968 } 12969 case Type::ObjCTypeParam: 12970 // FIXME: Try to merge these. 12971 return QualType(); 12972 case Type::Paren: 12973 return Ctx.getParenType(Ctx.getQualifiedType(Underlying)); 12974 12975 case Type::TemplateSpecialization: { 12976 const auto *TX = cast<TemplateSpecializationType>(X), 12977 *TY = cast<TemplateSpecializationType>(Y); 12978 TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(), 12979 TY->getTemplateName()); 12980 if (!CTN.getAsVoidPointer()) 12981 return QualType(); 12982 SmallVector<TemplateArgument, 8> Args; 12983 if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(), 12984 TY->template_arguments())) 12985 return QualType(); 12986 return Ctx.getTemplateSpecializationType(CTN, Args, 12987 Ctx.getQualifiedType(Underlying)); 12988 } 12989 case Type::Typedef: { 12990 const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y); 12991 const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl()); 12992 if (!CD) 12993 return QualType(); 12994 return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying)); 12995 } 12996 case Type::TypeOf: { 12997 // The common sugar between two typeof expressions, where one is 12998 // potentially a typeof_unqual and the other is not, we unify to the 12999 // qualified type as that retains the most information along with the type. 13000 // We only return a typeof_unqual type when both types are unqual types. 13001 TypeOfKind Kind = TypeOfKind::Qualified; 13002 if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() && 13003 cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified) 13004 Kind = TypeOfKind::Unqualified; 13005 return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind); 13006 } 13007 case Type::TypeOfExpr: 13008 return QualType(); 13009 13010 case Type::UnaryTransform: { 13011 const auto *UX = cast<UnaryTransformType>(X), 13012 *UY = cast<UnaryTransformType>(Y); 13013 UnaryTransformType::UTTKind KX = UX->getUTTKind(); 13014 if (KX != UY->getUTTKind()) 13015 return QualType(); 13016 QualType BX = UX->getBaseType(), BY = UY->getBaseType(); 13017 if (!Ctx.hasSameType(BX, BY)) 13018 return QualType(); 13019 // FIXME: It's inefficient to have to unify the base types. 13020 return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY), 13021 Ctx.getQualifiedType(Underlying), KX); 13022 } 13023 case Type::Using: { 13024 const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y); 13025 const UsingShadowDecl *CD = 13026 ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl()); 13027 if (!CD) 13028 return QualType(); 13029 return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); 13030 } 13031 } 13032 llvm_unreachable("Unhandled Type Class"); 13033 } 13034 13035 static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) { 13036 SmallVector<SplitQualType, 8> R; 13037 while (true) { 13038 QTotal += T.Quals; 13039 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); 13040 if (NT == QualType(T.Ty, 0)) 13041 break; 13042 R.push_back(T); 13043 T = NT.split(); 13044 } 13045 return R; 13046 } 13047 13048 QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, 13049 bool Unqualified) { 13050 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y)); 13051 if (X == Y) 13052 return X; 13053 if (!Unqualified) { 13054 if (X.isCanonical()) 13055 return X; 13056 if (Y.isCanonical()) 13057 return Y; 13058 } 13059 13060 SplitQualType SX = X.split(), SY = Y.split(); 13061 Qualifiers QX, QY; 13062 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys, 13063 // until we reach their underlying "canonical nodes". Note these are not 13064 // necessarily canonical types, as they may still have sugared properties. 13065 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively. 13066 auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY); 13067 if (SX.Ty != SY.Ty) { 13068 // The canonical nodes differ. Build a common canonical node out of the two, 13069 // unifying their sugar. This may recurse back here. 13070 SX.Ty = 13071 ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr(); 13072 } else { 13073 // The canonical nodes were identical: We may have desugared too much. 13074 // Add any common sugar back in. 13075 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) { 13076 QX -= SX.Quals; 13077 QY -= SY.Quals; 13078 SX = Xs.pop_back_val(); 13079 SY = Ys.pop_back_val(); 13080 } 13081 } 13082 if (Unqualified) 13083 QX = Qualifiers::removeCommonQualifiers(QX, QY); 13084 else 13085 assert(QX == QY); 13086 13087 // Even though the remaining sugar nodes in Xs and Ys differ, some may be 13088 // related. Walk up these nodes, unifying them and adding the result. 13089 while (!Xs.empty() && !Ys.empty()) { 13090 auto Underlying = SplitQualType( 13091 SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals)); 13092 SX = Xs.pop_back_val(); 13093 SY = Ys.pop_back_val(); 13094 SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying) 13095 .getTypePtrOrNull(); 13096 // Stop at the first pair which is unrelated. 13097 if (!SX.Ty) { 13098 SX.Ty = Underlying.Ty; 13099 break; 13100 } 13101 QX -= Underlying.Quals; 13102 }; 13103 13104 // Add back the missing accumulated qualifiers, which were stripped off 13105 // with the sugar nodes we could not unify. 13106 QualType R = getQualifiedType(SX.Ty, QX); 13107 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X)); 13108 return R; 13109 } 13110 13111 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 13112 assert(Ty->isFixedPointType()); 13113 13114 if (Ty->isSaturatedFixedPointType()) return Ty; 13115 13116 switch (Ty->castAs<BuiltinType>()->getKind()) { 13117 default: 13118 llvm_unreachable("Not a fixed point type!"); 13119 case BuiltinType::ShortAccum: 13120 return SatShortAccumTy; 13121 case BuiltinType::Accum: 13122 return SatAccumTy; 13123 case BuiltinType::LongAccum: 13124 return SatLongAccumTy; 13125 case BuiltinType::UShortAccum: 13126 return SatUnsignedShortAccumTy; 13127 case BuiltinType::UAccum: 13128 return SatUnsignedAccumTy; 13129 case BuiltinType::ULongAccum: 13130 return SatUnsignedLongAccumTy; 13131 case BuiltinType::ShortFract: 13132 return SatShortFractTy; 13133 case BuiltinType::Fract: 13134 return SatFractTy; 13135 case BuiltinType::LongFract: 13136 return SatLongFractTy; 13137 case BuiltinType::UShortFract: 13138 return SatUnsignedShortFractTy; 13139 case BuiltinType::UFract: 13140 return SatUnsignedFractTy; 13141 case BuiltinType::ULongFract: 13142 return SatUnsignedLongFractTy; 13143 } 13144 } 13145 13146 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 13147 if (LangOpts.OpenCL) 13148 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 13149 13150 if (LangOpts.CUDA) 13151 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 13152 13153 return getLangASFromTargetAS(AS); 13154 } 13155 13156 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 13157 // doesn't include ASTContext.h 13158 template 13159 clang::LazyGenerationalUpdatePtr< 13160 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 13161 clang::LazyGenerationalUpdatePtr< 13162 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 13163 const clang::ASTContext &Ctx, Decl *Value); 13164 13165 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 13166 assert(Ty->isFixedPointType()); 13167 13168 const TargetInfo &Target = getTargetInfo(); 13169 switch (Ty->castAs<BuiltinType>()->getKind()) { 13170 default: 13171 llvm_unreachable("Not a fixed point type!"); 13172 case BuiltinType::ShortAccum: 13173 case BuiltinType::SatShortAccum: 13174 return Target.getShortAccumScale(); 13175 case BuiltinType::Accum: 13176 case BuiltinType::SatAccum: 13177 return Target.getAccumScale(); 13178 case BuiltinType::LongAccum: 13179 case BuiltinType::SatLongAccum: 13180 return Target.getLongAccumScale(); 13181 case BuiltinType::UShortAccum: 13182 case BuiltinType::SatUShortAccum: 13183 return Target.getUnsignedShortAccumScale(); 13184 case BuiltinType::UAccum: 13185 case BuiltinType::SatUAccum: 13186 return Target.getUnsignedAccumScale(); 13187 case BuiltinType::ULongAccum: 13188 case BuiltinType::SatULongAccum: 13189 return Target.getUnsignedLongAccumScale(); 13190 case BuiltinType::ShortFract: 13191 case BuiltinType::SatShortFract: 13192 return Target.getShortFractScale(); 13193 case BuiltinType::Fract: 13194 case BuiltinType::SatFract: 13195 return Target.getFractScale(); 13196 case BuiltinType::LongFract: 13197 case BuiltinType::SatLongFract: 13198 return Target.getLongFractScale(); 13199 case BuiltinType::UShortFract: 13200 case BuiltinType::SatUShortFract: 13201 return Target.getUnsignedShortFractScale(); 13202 case BuiltinType::UFract: 13203 case BuiltinType::SatUFract: 13204 return Target.getUnsignedFractScale(); 13205 case BuiltinType::ULongFract: 13206 case BuiltinType::SatULongFract: 13207 return Target.getUnsignedLongFractScale(); 13208 } 13209 } 13210 13211 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 13212 assert(Ty->isFixedPointType()); 13213 13214 const TargetInfo &Target = getTargetInfo(); 13215 switch (Ty->castAs<BuiltinType>()->getKind()) { 13216 default: 13217 llvm_unreachable("Not a fixed point type!"); 13218 case BuiltinType::ShortAccum: 13219 case BuiltinType::SatShortAccum: 13220 return Target.getShortAccumIBits(); 13221 case BuiltinType::Accum: 13222 case BuiltinType::SatAccum: 13223 return Target.getAccumIBits(); 13224 case BuiltinType::LongAccum: 13225 case BuiltinType::SatLongAccum: 13226 return Target.getLongAccumIBits(); 13227 case BuiltinType::UShortAccum: 13228 case BuiltinType::SatUShortAccum: 13229 return Target.getUnsignedShortAccumIBits(); 13230 case BuiltinType::UAccum: 13231 case BuiltinType::SatUAccum: 13232 return Target.getUnsignedAccumIBits(); 13233 case BuiltinType::ULongAccum: 13234 case BuiltinType::SatULongAccum: 13235 return Target.getUnsignedLongAccumIBits(); 13236 case BuiltinType::ShortFract: 13237 case BuiltinType::SatShortFract: 13238 case BuiltinType::Fract: 13239 case BuiltinType::SatFract: 13240 case BuiltinType::LongFract: 13241 case BuiltinType::SatLongFract: 13242 case BuiltinType::UShortFract: 13243 case BuiltinType::SatUShortFract: 13244 case BuiltinType::UFract: 13245 case BuiltinType::SatUFract: 13246 case BuiltinType::ULongFract: 13247 case BuiltinType::SatULongFract: 13248 return 0; 13249 } 13250 } 13251 13252 llvm::FixedPointSemantics 13253 ASTContext::getFixedPointSemantics(QualType Ty) const { 13254 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 13255 "Can only get the fixed point semantics for a " 13256 "fixed point or integer type."); 13257 if (Ty->isIntegerType()) 13258 return llvm::FixedPointSemantics::GetIntegerSemantics( 13259 getIntWidth(Ty), Ty->isSignedIntegerType()); 13260 13261 bool isSigned = Ty->isSignedFixedPointType(); 13262 return llvm::FixedPointSemantics( 13263 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 13264 Ty->isSaturatedFixedPointType(), 13265 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 13266 } 13267 13268 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 13269 assert(Ty->isFixedPointType()); 13270 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 13271 } 13272 13273 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 13274 assert(Ty->isFixedPointType()); 13275 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 13276 } 13277 13278 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 13279 assert(Ty->isUnsignedFixedPointType() && 13280 "Expected unsigned fixed point type"); 13281 13282 switch (Ty->castAs<BuiltinType>()->getKind()) { 13283 case BuiltinType::UShortAccum: 13284 return ShortAccumTy; 13285 case BuiltinType::UAccum: 13286 return AccumTy; 13287 case BuiltinType::ULongAccum: 13288 return LongAccumTy; 13289 case BuiltinType::SatUShortAccum: 13290 return SatShortAccumTy; 13291 case BuiltinType::SatUAccum: 13292 return SatAccumTy; 13293 case BuiltinType::SatULongAccum: 13294 return SatLongAccumTy; 13295 case BuiltinType::UShortFract: 13296 return ShortFractTy; 13297 case BuiltinType::UFract: 13298 return FractTy; 13299 case BuiltinType::ULongFract: 13300 return LongFractTy; 13301 case BuiltinType::SatUShortFract: 13302 return SatShortFractTy; 13303 case BuiltinType::SatUFract: 13304 return SatFractTy; 13305 case BuiltinType::SatULongFract: 13306 return SatLongFractTy; 13307 default: 13308 llvm_unreachable("Unexpected unsigned fixed point type"); 13309 } 13310 } 13311 13312 std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs( 13313 const TargetVersionAttr *TV) const { 13314 assert(TV != nullptr); 13315 llvm::SmallVector<StringRef, 8> Feats; 13316 std::vector<std::string> ResFeats; 13317 TV->getFeatures(Feats); 13318 for (auto &Feature : Feats) 13319 if (Target->validateCpuSupports(Feature.str())) 13320 ResFeats.push_back("?" + Feature.str()); 13321 return ResFeats; 13322 } 13323 13324 ParsedTargetAttr 13325 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 13326 assert(TD != nullptr); 13327 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr()); 13328 13329 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 13330 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 13331 }); 13332 return ParsedAttr; 13333 } 13334 13335 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13336 const FunctionDecl *FD) const { 13337 if (FD) 13338 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 13339 else 13340 Target->initFeatureMap(FeatureMap, getDiagnostics(), 13341 Target->getTargetOpts().CPU, 13342 Target->getTargetOpts().Features); 13343 } 13344 13345 // Fills in the supplied string map with the set of target features for the 13346 // passed in function. 13347 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13348 GlobalDecl GD) const { 13349 StringRef TargetCPU = Target->getTargetOpts().CPU; 13350 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 13351 if (const auto *TD = FD->getAttr<TargetAttr>()) { 13352 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 13353 13354 // Make a copy of the features as passed on the command line into the 13355 // beginning of the additional features from the function to override. 13356 ParsedAttr.Features.insert( 13357 ParsedAttr.Features.begin(), 13358 Target->getTargetOpts().FeaturesAsWritten.begin(), 13359 Target->getTargetOpts().FeaturesAsWritten.end()); 13360 13361 if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) 13362 TargetCPU = ParsedAttr.CPU; 13363 13364 // Now populate the feature map, first with the TargetCPU which is either 13365 // the default or a new one from the target attribute string. Then we'll use 13366 // the passed in features (FeaturesAsWritten) along with the new ones from 13367 // the attribute. 13368 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 13369 ParsedAttr.Features); 13370 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 13371 llvm::SmallVector<StringRef, 32> FeaturesTmp; 13372 Target->getCPUSpecificCPUDispatchFeatures( 13373 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 13374 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 13375 Features.insert(Features.begin(), 13376 Target->getTargetOpts().FeaturesAsWritten.begin(), 13377 Target->getTargetOpts().FeaturesAsWritten.end()); 13378 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13379 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 13380 std::vector<std::string> Features; 13381 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 13382 if (Target->getTriple().isAArch64()) { 13383 // TargetClones for AArch64 13384 if (VersionStr != "default") { 13385 SmallVector<StringRef, 1> VersionFeatures; 13386 VersionStr.split(VersionFeatures, "+"); 13387 for (auto &VFeature : VersionFeatures) { 13388 VFeature = VFeature.trim(); 13389 Features.push_back((StringRef{"?"} + VFeature).str()); 13390 } 13391 } 13392 Features.insert(Features.begin(), 13393 Target->getTargetOpts().FeaturesAsWritten.begin(), 13394 Target->getTargetOpts().FeaturesAsWritten.end()); 13395 } else { 13396 if (VersionStr.startswith("arch=")) 13397 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 13398 else if (VersionStr != "default") 13399 Features.push_back((StringRef{"+"} + VersionStr).str()); 13400 } 13401 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13402 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { 13403 std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV); 13404 Feats.insert(Feats.begin(), 13405 Target->getTargetOpts().FeaturesAsWritten.begin(), 13406 Target->getTargetOpts().FeaturesAsWritten.end()); 13407 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats); 13408 } else { 13409 FeatureMap = Target->getTargetOpts().FeatureMap; 13410 } 13411 } 13412 13413 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 13414 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 13415 return *OMPTraitInfoVector.back(); 13416 } 13417 13418 const StreamingDiagnostic &clang:: 13419 operator<<(const StreamingDiagnostic &DB, 13420 const ASTContext::SectionInfo &Section) { 13421 if (Section.Decl) 13422 return DB << Section.Decl; 13423 return DB << "a prior #pragma section"; 13424 } 13425 13426 bool ASTContext::mayExternalize(const Decl *D) const { 13427 bool IsStaticVar = 13428 isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static; 13429 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 13430 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 13431 (D->hasAttr<CUDAConstantAttr>() && 13432 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 13433 // CUDA/HIP: static managed variables need to be externalized since it is 13434 // a declaration in IR, therefore cannot have internal linkage. Kernels in 13435 // anonymous name space needs to be externalized to avoid duplicate symbols. 13436 return (IsStaticVar && 13437 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 13438 (D->hasAttr<CUDAGlobalAttr>() && 13439 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 13440 GVA_Internal); 13441 } 13442 13443 bool ASTContext::shouldExternalize(const Decl *D) const { 13444 return mayExternalize(D) && 13445 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 13446 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 13447 } 13448 13449 StringRef ASTContext::getCUIDHash() const { 13450 if (!CUIDHash.empty()) 13451 return CUIDHash; 13452 if (LangOpts.CUID.empty()) 13453 return StringRef(); 13454 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 13455 return CUIDHash; 13456 } 13457