1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/ProfileList.h" 62 #include "clang/Basic/SourceLocation.h" 63 #include "clang/Basic/SourceManager.h" 64 #include "clang/Basic/Specifiers.h" 65 #include "clang/Basic/TargetCXXABI.h" 66 #include "clang/Basic/TargetInfo.h" 67 #include "clang/Basic/XRayLists.h" 68 #include "llvm/ADT/APFixedPoint.h" 69 #include "llvm/ADT/APInt.h" 70 #include "llvm/ADT/APSInt.h" 71 #include "llvm/ADT/ArrayRef.h" 72 #include "llvm/ADT/DenseMap.h" 73 #include "llvm/ADT/DenseSet.h" 74 #include "llvm/ADT/FoldingSet.h" 75 #include "llvm/ADT/PointerUnion.h" 76 #include "llvm/ADT/STLExtras.h" 77 #include "llvm/ADT/SmallPtrSet.h" 78 #include "llvm/ADT/SmallVector.h" 79 #include "llvm/ADT/StringExtras.h" 80 #include "llvm/ADT/StringRef.h" 81 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 82 #include "llvm/Support/Capacity.h" 83 #include "llvm/Support/Casting.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/MD5.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/TargetParser/Triple.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <optional> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 102 using namespace clang; 103 104 enum FloatingRank { 105 BFloat16Rank, 106 Float16Rank, 107 HalfRank, 108 FloatRank, 109 DoubleRank, 110 LongDoubleRank, 111 Float128Rank, 112 Ibm128Rank 113 }; 114 115 /// \returns The locations that are relevant when searching for Doc comments 116 /// related to \p D. 117 static SmallVector<SourceLocation, 2> 118 getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) { 119 assert(D); 120 121 // User can not attach documentation to implicit declarations. 122 if (D->isImplicit()) 123 return {}; 124 125 // User can not attach documentation to implicit instantiations. 126 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 127 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 128 return {}; 129 } 130 131 if (const auto *VD = dyn_cast<VarDecl>(D)) { 132 if (VD->isStaticDataMember() && 133 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 134 return {}; 135 } 136 137 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 138 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 139 return {}; 140 } 141 142 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 143 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 144 if (TSK == TSK_ImplicitInstantiation || 145 TSK == TSK_Undeclared) 146 return {}; 147 } 148 149 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 150 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 151 return {}; 152 } 153 if (const auto *TD = dyn_cast<TagDecl>(D)) { 154 // When tag declaration (but not definition!) is part of the 155 // decl-specifier-seq of some other declaration, it doesn't get comment 156 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 157 return {}; 158 } 159 // TODO: handle comments for function parameters properly. 160 if (isa<ParmVarDecl>(D)) 161 return {}; 162 163 // TODO: we could look up template parameter documentation in the template 164 // documentation. 165 if (isa<TemplateTypeParmDecl>(D) || 166 isa<NonTypeTemplateParmDecl>(D) || 167 isa<TemplateTemplateParmDecl>(D)) 168 return {}; 169 170 SmallVector<SourceLocation, 2> Locations; 171 // Find declaration location. 172 // For Objective-C declarations we generally don't expect to have multiple 173 // declarators, thus use declaration starting location as the "declaration 174 // location". 175 // For all other declarations multiple declarators are used quite frequently, 176 // so we use the location of the identifier as the "declaration location". 177 SourceLocation BaseLocation; 178 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 179 isa<ObjCPropertyDecl>(D) || isa<RedeclarableTemplateDecl>(D) || 180 isa<ClassTemplateSpecializationDecl>(D) || 181 // Allow association with Y across {} in `typedef struct X {} Y`. 182 isa<TypedefDecl>(D)) 183 BaseLocation = D->getBeginLoc(); 184 else 185 BaseLocation = D->getLocation(); 186 187 if (!D->getLocation().isMacroID()) { 188 Locations.emplace_back(BaseLocation); 189 } else { 190 const auto *DeclCtx = D->getDeclContext(); 191 192 // When encountering definitions generated from a macro (that are not 193 // contained by another declaration in the macro) we need to try and find 194 // the comment at the location of the expansion but if there is no comment 195 // there we should retry to see if there is a comment inside the macro as 196 // well. To this end we return first BaseLocation to first look at the 197 // expansion site, the second value is the spelling location of the 198 // beginning of the declaration defined inside the macro. 199 if (!(DeclCtx && 200 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) { 201 Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation)); 202 } 203 204 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that 205 // we don't refer to the macro argument location at the expansion site (this 206 // can happen if the name's spelling is provided via macro argument), and 207 // always to the declaration itself. 208 Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc())); 209 } 210 211 return Locations; 212 } 213 214 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 215 const Decl *D, const SourceLocation RepresentativeLocForDecl, 216 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 217 // If the declaration doesn't map directly to a location in a file, we 218 // can't find the comment. 219 if (RepresentativeLocForDecl.isInvalid() || 220 !RepresentativeLocForDecl.isFileID()) 221 return nullptr; 222 223 // If there are no comments anywhere, we won't find anything. 224 if (CommentsInTheFile.empty()) 225 return nullptr; 226 227 // Decompose the location for the declaration and find the beginning of the 228 // file buffer. 229 const std::pair<FileID, unsigned> DeclLocDecomp = 230 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 231 232 // Slow path. 233 auto OffsetCommentBehindDecl = 234 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 235 236 // First check whether we have a trailing comment. 237 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 238 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 239 if ((CommentBehindDecl->isDocumentation() || 240 LangOpts.CommentOpts.ParseAllComments) && 241 CommentBehindDecl->isTrailingComment() && 242 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 243 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 244 245 // Check that Doxygen trailing comment comes after the declaration, starts 246 // on the same line and in the same file as the declaration. 247 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 248 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 249 OffsetCommentBehindDecl->first)) { 250 return CommentBehindDecl; 251 } 252 } 253 } 254 255 // The comment just after the declaration was not a trailing comment. 256 // Let's look at the previous comment. 257 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 258 return nullptr; 259 260 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 261 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 262 263 // Check that we actually have a non-member Doxygen comment. 264 if (!(CommentBeforeDecl->isDocumentation() || 265 LangOpts.CommentOpts.ParseAllComments) || 266 CommentBeforeDecl->isTrailingComment()) 267 return nullptr; 268 269 // Decompose the end of the comment. 270 const unsigned CommentEndOffset = 271 Comments.getCommentEndOffset(CommentBeforeDecl); 272 273 // Get the corresponding buffer. 274 bool Invalid = false; 275 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 276 &Invalid).data(); 277 if (Invalid) 278 return nullptr; 279 280 // Extract text between the comment and declaration. 281 StringRef Text(Buffer + CommentEndOffset, 282 DeclLocDecomp.second - CommentEndOffset); 283 284 // There should be no other declarations or preprocessor directives between 285 // comment and declaration. 286 if (Text.find_last_of(";{}#@") != StringRef::npos) 287 return nullptr; 288 289 return CommentBeforeDecl; 290 } 291 292 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 293 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 294 295 for (const auto DeclLoc : DeclLocs) { 296 // If the declaration doesn't map directly to a location in a file, we 297 // can't find the comment. 298 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 299 continue; 300 301 if (ExternalSource && !CommentsLoaded) { 302 ExternalSource->ReadComments(); 303 CommentsLoaded = true; 304 } 305 306 if (Comments.empty()) 307 continue; 308 309 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 310 if (!File.isValid()) 311 continue; 312 313 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 314 if (!CommentsInThisFile || CommentsInThisFile->empty()) 315 continue; 316 317 if (RawComment *Comment = 318 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) 319 return Comment; 320 } 321 322 return nullptr; 323 } 324 325 void ASTContext::addComment(const RawComment &RC) { 326 assert(LangOpts.RetainCommentsFromSystemHeaders || 327 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 328 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 329 } 330 331 /// If we have a 'templated' declaration for a template, adjust 'D' to 332 /// refer to the actual template. 333 /// If we have an implicit instantiation, adjust 'D' to refer to template. 334 static const Decl &adjustDeclToTemplate(const Decl &D) { 335 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 336 // Is this function declaration part of a function template? 337 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 338 return *FTD; 339 340 // Nothing to do if function is not an implicit instantiation. 341 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 342 return D; 343 344 // Function is an implicit instantiation of a function template? 345 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 346 return *FTD; 347 348 // Function is instantiated from a member definition of a class template? 349 if (const FunctionDecl *MemberDecl = 350 FD->getInstantiatedFromMemberFunction()) 351 return *MemberDecl; 352 353 return D; 354 } 355 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 356 // Static data member is instantiated from a member definition of a class 357 // template? 358 if (VD->isStaticDataMember()) 359 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 360 return *MemberDecl; 361 362 return D; 363 } 364 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 365 // Is this class declaration part of a class template? 366 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 367 return *CTD; 368 369 // Class is an implicit instantiation of a class template or partial 370 // specialization? 371 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 372 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 373 return D; 374 llvm::PointerUnion<ClassTemplateDecl *, 375 ClassTemplatePartialSpecializationDecl *> 376 PU = CTSD->getSpecializedTemplateOrPartial(); 377 return PU.is<ClassTemplateDecl *>() 378 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 379 : *static_cast<const Decl *>( 380 PU.get<ClassTemplatePartialSpecializationDecl *>()); 381 } 382 383 // Class is instantiated from a member definition of a class template? 384 if (const MemberSpecializationInfo *Info = 385 CRD->getMemberSpecializationInfo()) 386 return *Info->getInstantiatedFrom(); 387 388 return D; 389 } 390 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 391 // Enum is instantiated from a member definition of a class template? 392 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 393 return *MemberDecl; 394 395 return D; 396 } 397 // FIXME: Adjust alias templates? 398 return D; 399 } 400 401 const RawComment *ASTContext::getRawCommentForAnyRedecl( 402 const Decl *D, 403 const Decl **OriginalDecl) const { 404 if (!D) { 405 if (OriginalDecl) 406 OriginalDecl = nullptr; 407 return nullptr; 408 } 409 410 D = &adjustDeclToTemplate(*D); 411 412 // Any comment directly attached to D? 413 { 414 auto DeclComment = DeclRawComments.find(D); 415 if (DeclComment != DeclRawComments.end()) { 416 if (OriginalDecl) 417 *OriginalDecl = D; 418 return DeclComment->second; 419 } 420 } 421 422 // Any comment attached to any redeclaration of D? 423 const Decl *CanonicalD = D->getCanonicalDecl(); 424 if (!CanonicalD) 425 return nullptr; 426 427 { 428 auto RedeclComment = RedeclChainComments.find(CanonicalD); 429 if (RedeclComment != RedeclChainComments.end()) { 430 if (OriginalDecl) 431 *OriginalDecl = RedeclComment->second; 432 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 433 assert(CommentAtRedecl != DeclRawComments.end() && 434 "This decl is supposed to have comment attached."); 435 return CommentAtRedecl->second; 436 } 437 } 438 439 // Any redeclarations of D that we haven't checked for comments yet? 440 // We can't use DenseMap::iterator directly since it'd get invalid. 441 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 442 return CommentlessRedeclChains.lookup(CanonicalD); 443 }(); 444 445 for (const auto Redecl : D->redecls()) { 446 assert(Redecl); 447 // Skip all redeclarations that have been checked previously. 448 if (LastCheckedRedecl) { 449 if (LastCheckedRedecl == Redecl) { 450 LastCheckedRedecl = nullptr; 451 } 452 continue; 453 } 454 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 455 if (RedeclComment) { 456 cacheRawCommentForDecl(*Redecl, *RedeclComment); 457 if (OriginalDecl) 458 *OriginalDecl = Redecl; 459 return RedeclComment; 460 } 461 CommentlessRedeclChains[CanonicalD] = Redecl; 462 } 463 464 if (OriginalDecl) 465 *OriginalDecl = nullptr; 466 return nullptr; 467 } 468 469 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 470 const RawComment &Comment) const { 471 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 472 DeclRawComments.try_emplace(&OriginalD, &Comment); 473 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 474 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 475 CommentlessRedeclChains.erase(CanonicalDecl); 476 } 477 478 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 479 SmallVectorImpl<const NamedDecl *> &Redeclared) { 480 const DeclContext *DC = ObjCMethod->getDeclContext(); 481 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 482 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 483 if (!ID) 484 return; 485 // Add redeclared method here. 486 for (const auto *Ext : ID->known_extensions()) { 487 if (ObjCMethodDecl *RedeclaredMethod = 488 Ext->getMethod(ObjCMethod->getSelector(), 489 ObjCMethod->isInstanceMethod())) 490 Redeclared.push_back(RedeclaredMethod); 491 } 492 } 493 } 494 495 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 496 const Preprocessor *PP) { 497 if (Comments.empty() || Decls.empty()) 498 return; 499 500 FileID File; 501 for (Decl *D : Decls) { 502 SourceLocation Loc = D->getLocation(); 503 if (Loc.isValid()) { 504 // See if there are any new comments that are not attached to a decl. 505 // The location doesn't have to be precise - we care only about the file. 506 File = SourceMgr.getDecomposedLoc(Loc).first; 507 break; 508 } 509 } 510 511 if (File.isInvalid()) 512 return; 513 514 auto CommentsInThisFile = Comments.getCommentsInFile(File); 515 if (!CommentsInThisFile || CommentsInThisFile->empty() || 516 CommentsInThisFile->rbegin()->second->isAttached()) 517 return; 518 519 // There is at least one comment not attached to a decl. 520 // Maybe it should be attached to one of Decls? 521 // 522 // Note that this way we pick up not only comments that precede the 523 // declaration, but also comments that *follow* the declaration -- thanks to 524 // the lookahead in the lexer: we've consumed the semicolon and looked 525 // ahead through comments. 526 for (const Decl *D : Decls) { 527 assert(D); 528 if (D->isInvalidDecl()) 529 continue; 530 531 D = &adjustDeclToTemplate(*D); 532 533 if (DeclRawComments.count(D) > 0) 534 continue; 535 536 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 537 538 for (const auto DeclLoc : DeclLocs) { 539 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 540 continue; 541 542 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl( 543 D, DeclLoc, *CommentsInThisFile)) { 544 cacheRawCommentForDecl(*D, *DocComment); 545 comments::FullComment *FC = DocComment->parse(*this, PP, D); 546 ParsedComments[D->getCanonicalDecl()] = FC; 547 break; 548 } 549 } 550 } 551 } 552 553 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 554 const Decl *D) const { 555 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 556 ThisDeclInfo->CommentDecl = D; 557 ThisDeclInfo->IsFilled = false; 558 ThisDeclInfo->fill(); 559 ThisDeclInfo->CommentDecl = FC->getDecl(); 560 if (!ThisDeclInfo->TemplateParameters) 561 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 562 comments::FullComment *CFC = 563 new (*this) comments::FullComment(FC->getBlocks(), 564 ThisDeclInfo); 565 return CFC; 566 } 567 568 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 569 const RawComment *RC = getRawCommentForDeclNoCache(D); 570 return RC ? RC->parse(*this, nullptr, D) : nullptr; 571 } 572 573 comments::FullComment *ASTContext::getCommentForDecl( 574 const Decl *D, 575 const Preprocessor *PP) const { 576 if (!D || D->isInvalidDecl()) 577 return nullptr; 578 D = &adjustDeclToTemplate(*D); 579 580 const Decl *Canonical = D->getCanonicalDecl(); 581 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 582 ParsedComments.find(Canonical); 583 584 if (Pos != ParsedComments.end()) { 585 if (Canonical != D) { 586 comments::FullComment *FC = Pos->second; 587 comments::FullComment *CFC = cloneFullComment(FC, D); 588 return CFC; 589 } 590 return Pos->second; 591 } 592 593 const Decl *OriginalDecl = nullptr; 594 595 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 596 if (!RC) { 597 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 598 SmallVector<const NamedDecl*, 8> Overridden; 599 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 600 if (OMD && OMD->isPropertyAccessor()) 601 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 602 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 603 return cloneFullComment(FC, D); 604 if (OMD) 605 addRedeclaredMethods(OMD, Overridden); 606 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 607 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 608 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 609 return cloneFullComment(FC, D); 610 } 611 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 612 // Attach any tag type's documentation to its typedef if latter 613 // does not have one of its own. 614 QualType QT = TD->getUnderlyingType(); 615 if (const auto *TT = QT->getAs<TagType>()) 616 if (const Decl *TD = TT->getDecl()) 617 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 618 return cloneFullComment(FC, D); 619 } 620 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 621 while (IC->getSuperClass()) { 622 IC = IC->getSuperClass(); 623 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 624 return cloneFullComment(FC, D); 625 } 626 } 627 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 628 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 629 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 630 return cloneFullComment(FC, D); 631 } 632 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 633 if (!(RD = RD->getDefinition())) 634 return nullptr; 635 // Check non-virtual bases. 636 for (const auto &I : RD->bases()) { 637 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 638 continue; 639 QualType Ty = I.getType(); 640 if (Ty.isNull()) 641 continue; 642 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 643 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 644 continue; 645 646 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 647 return cloneFullComment(FC, D); 648 } 649 } 650 // Check virtual bases. 651 for (const auto &I : RD->vbases()) { 652 if (I.getAccessSpecifier() != AS_public) 653 continue; 654 QualType Ty = I.getType(); 655 if (Ty.isNull()) 656 continue; 657 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 658 if (!(VirtualBase= VirtualBase->getDefinition())) 659 continue; 660 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 661 return cloneFullComment(FC, D); 662 } 663 } 664 } 665 return nullptr; 666 } 667 668 // If the RawComment was attached to other redeclaration of this Decl, we 669 // should parse the comment in context of that other Decl. This is important 670 // because comments can contain references to parameter names which can be 671 // different across redeclarations. 672 if (D != OriginalDecl && OriginalDecl) 673 return getCommentForDecl(OriginalDecl, PP); 674 675 comments::FullComment *FC = RC->parse(*this, PP, D); 676 ParsedComments[Canonical] = FC; 677 return FC; 678 } 679 680 void 681 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 682 const ASTContext &C, 683 TemplateTemplateParmDecl *Parm) { 684 ID.AddInteger(Parm->getDepth()); 685 ID.AddInteger(Parm->getPosition()); 686 ID.AddBoolean(Parm->isParameterPack()); 687 688 TemplateParameterList *Params = Parm->getTemplateParameters(); 689 ID.AddInteger(Params->size()); 690 for (TemplateParameterList::const_iterator P = Params->begin(), 691 PEnd = Params->end(); 692 P != PEnd; ++P) { 693 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 694 ID.AddInteger(0); 695 ID.AddBoolean(TTP->isParameterPack()); 696 if (TTP->isExpandedParameterPack()) { 697 ID.AddBoolean(true); 698 ID.AddInteger(TTP->getNumExpansionParameters()); 699 } else 700 ID.AddBoolean(false); 701 continue; 702 } 703 704 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 705 ID.AddInteger(1); 706 ID.AddBoolean(NTTP->isParameterPack()); 707 ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType())) 708 .getAsOpaquePtr()); 709 if (NTTP->isExpandedParameterPack()) { 710 ID.AddBoolean(true); 711 ID.AddInteger(NTTP->getNumExpansionTypes()); 712 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 713 QualType T = NTTP->getExpansionType(I); 714 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 715 } 716 } else 717 ID.AddBoolean(false); 718 continue; 719 } 720 721 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 722 ID.AddInteger(2); 723 Profile(ID, C, TTP); 724 } 725 } 726 727 TemplateTemplateParmDecl * 728 ASTContext::getCanonicalTemplateTemplateParmDecl( 729 TemplateTemplateParmDecl *TTP) const { 730 // Check if we already have a canonical template template parameter. 731 llvm::FoldingSetNodeID ID; 732 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 733 void *InsertPos = nullptr; 734 CanonicalTemplateTemplateParm *Canonical 735 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 736 if (Canonical) 737 return Canonical->getParam(); 738 739 // Build a canonical template parameter list. 740 TemplateParameterList *Params = TTP->getTemplateParameters(); 741 SmallVector<NamedDecl *, 4> CanonParams; 742 CanonParams.reserve(Params->size()); 743 for (TemplateParameterList::const_iterator P = Params->begin(), 744 PEnd = Params->end(); 745 P != PEnd; ++P) { 746 // Note that, per C++20 [temp.over.link]/6, when determining whether 747 // template-parameters are equivalent, constraints are ignored. 748 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 749 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create( 750 *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 751 TTP->getDepth(), TTP->getIndex(), nullptr, false, 752 TTP->isParameterPack(), /*HasTypeConstraint=*/false, 753 TTP->isExpandedParameterPack() 754 ? std::optional<unsigned>(TTP->getNumExpansionParameters()) 755 : std::nullopt); 756 CanonParams.push_back(NewTTP); 757 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 758 QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType())); 759 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 760 NonTypeTemplateParmDecl *Param; 761 if (NTTP->isExpandedParameterPack()) { 762 SmallVector<QualType, 2> ExpandedTypes; 763 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 764 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 765 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 766 ExpandedTInfos.push_back( 767 getTrivialTypeSourceInfo(ExpandedTypes.back())); 768 } 769 770 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 771 SourceLocation(), 772 SourceLocation(), 773 NTTP->getDepth(), 774 NTTP->getPosition(), nullptr, 775 T, 776 TInfo, 777 ExpandedTypes, 778 ExpandedTInfos); 779 } else { 780 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 781 SourceLocation(), 782 SourceLocation(), 783 NTTP->getDepth(), 784 NTTP->getPosition(), nullptr, 785 T, 786 NTTP->isParameterPack(), 787 TInfo); 788 } 789 CanonParams.push_back(Param); 790 } else 791 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 792 cast<TemplateTemplateParmDecl>(*P))); 793 } 794 795 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create( 796 *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), 797 TTP->getPosition(), TTP->isParameterPack(), nullptr, 798 TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), 799 CanonParams, SourceLocation(), 800 /*RequiresClause=*/nullptr)); 801 802 // Get the new insert position for the node we care about. 803 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 804 assert(!Canonical && "Shouldn't be in the map!"); 805 (void)Canonical; 806 807 // Create the canonical template template parameter entry. 808 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 809 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 810 return CanonTTP; 811 } 812 813 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 814 auto Kind = getTargetInfo().getCXXABI().getKind(); 815 return getLangOpts().CXXABI.value_or(Kind); 816 } 817 818 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 819 if (!LangOpts.CPlusPlus) return nullptr; 820 821 switch (getCXXABIKind()) { 822 case TargetCXXABI::AppleARM64: 823 case TargetCXXABI::Fuchsia: 824 case TargetCXXABI::GenericARM: // Same as Itanium at this level 825 case TargetCXXABI::iOS: 826 case TargetCXXABI::WatchOS: 827 case TargetCXXABI::GenericAArch64: 828 case TargetCXXABI::GenericMIPS: 829 case TargetCXXABI::GenericItanium: 830 case TargetCXXABI::WebAssembly: 831 case TargetCXXABI::XL: 832 return CreateItaniumCXXABI(*this); 833 case TargetCXXABI::Microsoft: 834 return CreateMicrosoftCXXABI(*this); 835 } 836 llvm_unreachable("Invalid CXXABI type!"); 837 } 838 839 interp::Context &ASTContext::getInterpContext() { 840 if (!InterpContext) { 841 InterpContext.reset(new interp::Context(*this)); 842 } 843 return *InterpContext.get(); 844 } 845 846 ParentMapContext &ASTContext::getParentMapContext() { 847 if (!ParentMapCtx) 848 ParentMapCtx.reset(new ParentMapContext(*this)); 849 return *ParentMapCtx.get(); 850 } 851 852 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 853 const LangOptions &LangOpts) { 854 switch (LangOpts.getAddressSpaceMapMangling()) { 855 case LangOptions::ASMM_Target: 856 return TI.useAddressSpaceMapMangling(); 857 case LangOptions::ASMM_On: 858 return true; 859 case LangOptions::ASMM_Off: 860 return false; 861 } 862 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 863 } 864 865 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 866 IdentifierTable &idents, SelectorTable &sels, 867 Builtin::Context &builtins, TranslationUnitKind TUKind) 868 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 869 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()), 870 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()), 871 DependentSizedMatrixTypes(this_()), 872 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 873 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()), 874 TemplateSpecializationTypes(this_()), 875 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 876 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()), 877 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 878 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 879 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 880 LangOpts.XRayNeverInstrumentFiles, 881 LangOpts.XRayAttrListFiles, SM)), 882 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 883 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 884 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 885 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 886 CompCategories(this_()), LastSDM(nullptr, 0) { 887 addTranslationUnitDecl(); 888 } 889 890 void ASTContext::cleanup() { 891 // Release the DenseMaps associated with DeclContext objects. 892 // FIXME: Is this the ideal solution? 893 ReleaseDeclContextMaps(); 894 895 // Call all of the deallocation functions on all of their targets. 896 for (auto &Pair : Deallocations) 897 (Pair.first)(Pair.second); 898 Deallocations.clear(); 899 900 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 901 // because they can contain DenseMaps. 902 for (llvm::DenseMap<const ObjCContainerDecl*, 903 const ASTRecordLayout*>::iterator 904 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 905 // Increment in loop to prevent using deallocated memory. 906 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 907 R->Destroy(*this); 908 ObjCLayouts.clear(); 909 910 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 911 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 912 // Increment in loop to prevent using deallocated memory. 913 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 914 R->Destroy(*this); 915 } 916 ASTRecordLayouts.clear(); 917 918 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 919 AEnd = DeclAttrs.end(); 920 A != AEnd; ++A) 921 A->second->~AttrVec(); 922 DeclAttrs.clear(); 923 924 for (const auto &Value : ModuleInitializers) 925 Value.second->~PerModuleInitializers(); 926 ModuleInitializers.clear(); 927 } 928 929 ASTContext::~ASTContext() { cleanup(); } 930 931 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 932 TraversalScope = TopLevelDecls; 933 getParentMapContext().clear(); 934 } 935 936 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 937 Deallocations.push_back({Callback, Data}); 938 } 939 940 void 941 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 942 ExternalSource = std::move(Source); 943 } 944 945 void ASTContext::PrintStats() const { 946 llvm::errs() << "\n*** AST Context Stats:\n"; 947 llvm::errs() << " " << Types.size() << " types total.\n"; 948 949 unsigned counts[] = { 950 #define TYPE(Name, Parent) 0, 951 #define ABSTRACT_TYPE(Name, Parent) 952 #include "clang/AST/TypeNodes.inc" 953 0 // Extra 954 }; 955 956 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 957 Type *T = Types[i]; 958 counts[(unsigned)T->getTypeClass()]++; 959 } 960 961 unsigned Idx = 0; 962 unsigned TotalBytes = 0; 963 #define TYPE(Name, Parent) \ 964 if (counts[Idx]) \ 965 llvm::errs() << " " << counts[Idx] << " " << #Name \ 966 << " types, " << sizeof(Name##Type) << " each " \ 967 << "(" << counts[Idx] * sizeof(Name##Type) \ 968 << " bytes)\n"; \ 969 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 970 ++Idx; 971 #define ABSTRACT_TYPE(Name, Parent) 972 #include "clang/AST/TypeNodes.inc" 973 974 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 975 976 // Implicit special member functions. 977 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 978 << NumImplicitDefaultConstructors 979 << " implicit default constructors created\n"; 980 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 981 << NumImplicitCopyConstructors 982 << " implicit copy constructors created\n"; 983 if (getLangOpts().CPlusPlus) 984 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 985 << NumImplicitMoveConstructors 986 << " implicit move constructors created\n"; 987 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 988 << NumImplicitCopyAssignmentOperators 989 << " implicit copy assignment operators created\n"; 990 if (getLangOpts().CPlusPlus) 991 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 992 << NumImplicitMoveAssignmentOperators 993 << " implicit move assignment operators created\n"; 994 llvm::errs() << NumImplicitDestructorsDeclared << "/" 995 << NumImplicitDestructors 996 << " implicit destructors created\n"; 997 998 if (ExternalSource) { 999 llvm::errs() << "\n"; 1000 ExternalSource->PrintStats(); 1001 } 1002 1003 BumpAlloc.PrintStats(); 1004 } 1005 1006 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1007 bool NotifyListeners) { 1008 if (NotifyListeners) 1009 if (auto *Listener = getASTMutationListener()) 1010 Listener->RedefinedHiddenDefinition(ND, M); 1011 1012 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1013 } 1014 1015 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1016 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1017 if (It == MergedDefModules.end()) 1018 return; 1019 1020 auto &Merged = It->second; 1021 llvm::DenseSet<Module*> Found; 1022 for (Module *&M : Merged) 1023 if (!Found.insert(M).second) 1024 M = nullptr; 1025 llvm::erase(Merged, nullptr); 1026 } 1027 1028 ArrayRef<Module *> 1029 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1030 auto MergedIt = 1031 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1032 if (MergedIt == MergedDefModules.end()) 1033 return std::nullopt; 1034 return MergedIt->second; 1035 } 1036 1037 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1038 if (LazyInitializers.empty()) 1039 return; 1040 1041 auto *Source = Ctx.getExternalSource(); 1042 assert(Source && "lazy initializers but no external source"); 1043 1044 auto LazyInits = std::move(LazyInitializers); 1045 LazyInitializers.clear(); 1046 1047 for (auto ID : LazyInits) 1048 Initializers.push_back(Source->GetExternalDecl(ID)); 1049 1050 assert(LazyInitializers.empty() && 1051 "GetExternalDecl for lazy module initializer added more inits"); 1052 } 1053 1054 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1055 // One special case: if we add a module initializer that imports another 1056 // module, and that module's only initializer is an ImportDecl, simplify. 1057 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1058 auto It = ModuleInitializers.find(ID->getImportedModule()); 1059 1060 // Maybe the ImportDecl does nothing at all. (Common case.) 1061 if (It == ModuleInitializers.end()) 1062 return; 1063 1064 // Maybe the ImportDecl only imports another ImportDecl. 1065 auto &Imported = *It->second; 1066 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1067 Imported.resolve(*this); 1068 auto *OnlyDecl = Imported.Initializers.front(); 1069 if (isa<ImportDecl>(OnlyDecl)) 1070 D = OnlyDecl; 1071 } 1072 } 1073 1074 auto *&Inits = ModuleInitializers[M]; 1075 if (!Inits) 1076 Inits = new (*this) PerModuleInitializers; 1077 Inits->Initializers.push_back(D); 1078 } 1079 1080 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1081 auto *&Inits = ModuleInitializers[M]; 1082 if (!Inits) 1083 Inits = new (*this) PerModuleInitializers; 1084 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1085 IDs.begin(), IDs.end()); 1086 } 1087 1088 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1089 auto It = ModuleInitializers.find(M); 1090 if (It == ModuleInitializers.end()) 1091 return std::nullopt; 1092 1093 auto *Inits = It->second; 1094 Inits->resolve(*this); 1095 return Inits->Initializers; 1096 } 1097 1098 void ASTContext::setCurrentNamedModule(Module *M) { 1099 assert(M->isNamedModule()); 1100 assert(!CurrentCXXNamedModule && 1101 "We should set named module for ASTContext for only once"); 1102 CurrentCXXNamedModule = M; 1103 } 1104 1105 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1106 if (!ExternCContext) 1107 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1108 1109 return ExternCContext; 1110 } 1111 1112 BuiltinTemplateDecl * 1113 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1114 const IdentifierInfo *II) const { 1115 auto *BuiltinTemplate = 1116 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1117 BuiltinTemplate->setImplicit(); 1118 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1119 1120 return BuiltinTemplate; 1121 } 1122 1123 BuiltinTemplateDecl * 1124 ASTContext::getMakeIntegerSeqDecl() const { 1125 if (!MakeIntegerSeqDecl) 1126 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1127 getMakeIntegerSeqName()); 1128 return MakeIntegerSeqDecl; 1129 } 1130 1131 BuiltinTemplateDecl * 1132 ASTContext::getTypePackElementDecl() const { 1133 if (!TypePackElementDecl) 1134 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1135 getTypePackElementName()); 1136 return TypePackElementDecl; 1137 } 1138 1139 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1140 RecordDecl::TagKind TK) const { 1141 SourceLocation Loc; 1142 RecordDecl *NewDecl; 1143 if (getLangOpts().CPlusPlus) 1144 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1145 Loc, &Idents.get(Name)); 1146 else 1147 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1148 &Idents.get(Name)); 1149 NewDecl->setImplicit(); 1150 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1151 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1152 return NewDecl; 1153 } 1154 1155 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1156 StringRef Name) const { 1157 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1158 TypedefDecl *NewDecl = TypedefDecl::Create( 1159 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1160 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1161 NewDecl->setImplicit(); 1162 return NewDecl; 1163 } 1164 1165 TypedefDecl *ASTContext::getInt128Decl() const { 1166 if (!Int128Decl) 1167 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1168 return Int128Decl; 1169 } 1170 1171 TypedefDecl *ASTContext::getUInt128Decl() const { 1172 if (!UInt128Decl) 1173 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1174 return UInt128Decl; 1175 } 1176 1177 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1178 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K); 1179 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1180 Types.push_back(Ty); 1181 } 1182 1183 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1184 const TargetInfo *AuxTarget) { 1185 assert((!this->Target || this->Target == &Target) && 1186 "Incorrect target reinitialization"); 1187 assert(VoidTy.isNull() && "Context reinitialized?"); 1188 1189 this->Target = &Target; 1190 this->AuxTarget = AuxTarget; 1191 1192 ABI.reset(createCXXABI(Target)); 1193 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1194 1195 // C99 6.2.5p19. 1196 InitBuiltinType(VoidTy, BuiltinType::Void); 1197 1198 // C99 6.2.5p2. 1199 InitBuiltinType(BoolTy, BuiltinType::Bool); 1200 // C99 6.2.5p3. 1201 if (LangOpts.CharIsSigned) 1202 InitBuiltinType(CharTy, BuiltinType::Char_S); 1203 else 1204 InitBuiltinType(CharTy, BuiltinType::Char_U); 1205 // C99 6.2.5p4. 1206 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1207 InitBuiltinType(ShortTy, BuiltinType::Short); 1208 InitBuiltinType(IntTy, BuiltinType::Int); 1209 InitBuiltinType(LongTy, BuiltinType::Long); 1210 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1211 1212 // C99 6.2.5p6. 1213 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1214 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1215 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1216 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1217 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1218 1219 // C99 6.2.5p10. 1220 InitBuiltinType(FloatTy, BuiltinType::Float); 1221 InitBuiltinType(DoubleTy, BuiltinType::Double); 1222 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1223 1224 // GNU extension, __float128 for IEEE quadruple precision 1225 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1226 1227 // __ibm128 for IBM extended precision 1228 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1229 1230 // C11 extension ISO/IEC TS 18661-3 1231 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1232 1233 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1234 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1235 InitBuiltinType(AccumTy, BuiltinType::Accum); 1236 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1237 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1238 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1239 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1240 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1241 InitBuiltinType(FractTy, BuiltinType::Fract); 1242 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1243 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1244 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1245 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1246 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1247 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1248 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1249 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1250 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1251 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1252 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1253 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1254 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1255 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1256 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1257 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1258 1259 // GNU extension, 128-bit integers. 1260 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1261 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1262 1263 // C++ 3.9.1p5 1264 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1265 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1266 else // -fshort-wchar makes wchar_t be unsigned. 1267 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1268 if (LangOpts.CPlusPlus && LangOpts.WChar) 1269 WideCharTy = WCharTy; 1270 else { 1271 // C99 (or C++ using -fno-wchar). 1272 WideCharTy = getFromTargetType(Target.getWCharType()); 1273 } 1274 1275 WIntTy = getFromTargetType(Target.getWIntType()); 1276 1277 // C++20 (proposed) 1278 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1279 1280 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1281 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1282 else // C99 1283 Char16Ty = getFromTargetType(Target.getChar16Type()); 1284 1285 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1286 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1287 else // C99 1288 Char32Ty = getFromTargetType(Target.getChar32Type()); 1289 1290 // Placeholder type for type-dependent expressions whose type is 1291 // completely unknown. No code should ever check a type against 1292 // DependentTy and users should never see it; however, it is here to 1293 // help diagnose failures to properly check for type-dependent 1294 // expressions. 1295 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1296 1297 // Placeholder type for functions. 1298 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1299 1300 // Placeholder type for bound members. 1301 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1302 1303 // Placeholder type for pseudo-objects. 1304 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1305 1306 // "any" type; useful for debugger-like clients. 1307 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1308 1309 // Placeholder type for unbridged ARC casts. 1310 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1311 1312 // Placeholder type for builtin functions. 1313 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1314 1315 // Placeholder type for OMP array sections. 1316 if (LangOpts.OpenMP) { 1317 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1318 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1319 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1320 } 1321 if (LangOpts.MatrixTypes) 1322 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1323 1324 // Builtin types for 'id', 'Class', and 'SEL'. 1325 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1326 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1327 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1328 1329 if (LangOpts.OpenCL) { 1330 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1331 InitBuiltinType(SingletonId, BuiltinType::Id); 1332 #include "clang/Basic/OpenCLImageTypes.def" 1333 1334 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1335 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1336 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1337 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1338 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1339 1340 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1341 InitBuiltinType(Id##Ty, BuiltinType::Id); 1342 #include "clang/Basic/OpenCLExtensionTypes.def" 1343 } 1344 1345 if (Target.hasAArch64SVETypes()) { 1346 #define SVE_TYPE(Name, Id, SingletonId) \ 1347 InitBuiltinType(SingletonId, BuiltinType::Id); 1348 #include "clang/Basic/AArch64SVEACLETypes.def" 1349 } 1350 1351 if (Target.getTriple().isPPC64()) { 1352 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1353 InitBuiltinType(Id##Ty, BuiltinType::Id); 1354 #include "clang/Basic/PPCTypes.def" 1355 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1356 InitBuiltinType(Id##Ty, BuiltinType::Id); 1357 #include "clang/Basic/PPCTypes.def" 1358 } 1359 1360 if (Target.hasRISCVVTypes()) { 1361 #define RVV_TYPE(Name, Id, SingletonId) \ 1362 InitBuiltinType(SingletonId, BuiltinType::Id); 1363 #include "clang/Basic/RISCVVTypes.def" 1364 } 1365 1366 if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) { 1367 #define WASM_TYPE(Name, Id, SingletonId) \ 1368 InitBuiltinType(SingletonId, BuiltinType::Id); 1369 #include "clang/Basic/WebAssemblyReferenceTypes.def" 1370 } 1371 1372 // Builtin type for __objc_yes and __objc_no 1373 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1374 SignedCharTy : BoolTy); 1375 1376 ObjCConstantStringType = QualType(); 1377 1378 ObjCSuperType = QualType(); 1379 1380 // void * type 1381 if (LangOpts.OpenCLGenericAddressSpace) { 1382 auto Q = VoidTy.getQualifiers(); 1383 Q.setAddressSpace(LangAS::opencl_generic); 1384 VoidPtrTy = getPointerType(getCanonicalType( 1385 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1386 } else { 1387 VoidPtrTy = getPointerType(VoidTy); 1388 } 1389 1390 // nullptr type (C++0x 2.14.7) 1391 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1392 1393 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1394 InitBuiltinType(HalfTy, BuiltinType::Half); 1395 1396 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1397 1398 // Builtin type used to help define __builtin_va_list. 1399 VaListTagDecl = nullptr; 1400 1401 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1402 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1403 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1404 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1405 } 1406 } 1407 1408 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1409 return SourceMgr.getDiagnostics(); 1410 } 1411 1412 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1413 AttrVec *&Result = DeclAttrs[D]; 1414 if (!Result) { 1415 void *Mem = Allocate(sizeof(AttrVec)); 1416 Result = new (Mem) AttrVec; 1417 } 1418 1419 return *Result; 1420 } 1421 1422 /// Erase the attributes corresponding to the given declaration. 1423 void ASTContext::eraseDeclAttrs(const Decl *D) { 1424 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1425 if (Pos != DeclAttrs.end()) { 1426 Pos->second->~AttrVec(); 1427 DeclAttrs.erase(Pos); 1428 } 1429 } 1430 1431 // FIXME: Remove ? 1432 MemberSpecializationInfo * 1433 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1434 assert(Var->isStaticDataMember() && "Not a static data member"); 1435 return getTemplateOrSpecializationInfo(Var) 1436 .dyn_cast<MemberSpecializationInfo *>(); 1437 } 1438 1439 ASTContext::TemplateOrSpecializationInfo 1440 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1441 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1442 TemplateOrInstantiation.find(Var); 1443 if (Pos == TemplateOrInstantiation.end()) 1444 return {}; 1445 1446 return Pos->second; 1447 } 1448 1449 void 1450 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1451 TemplateSpecializationKind TSK, 1452 SourceLocation PointOfInstantiation) { 1453 assert(Inst->isStaticDataMember() && "Not a static data member"); 1454 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1455 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1456 Tmpl, TSK, PointOfInstantiation)); 1457 } 1458 1459 void 1460 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1461 TemplateOrSpecializationInfo TSI) { 1462 assert(!TemplateOrInstantiation[Inst] && 1463 "Already noted what the variable was instantiated from"); 1464 TemplateOrInstantiation[Inst] = TSI; 1465 } 1466 1467 NamedDecl * 1468 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1469 return InstantiatedFromUsingDecl.lookup(UUD); 1470 } 1471 1472 void 1473 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1474 assert((isa<UsingDecl>(Pattern) || 1475 isa<UnresolvedUsingValueDecl>(Pattern) || 1476 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1477 "pattern decl is not a using decl"); 1478 assert((isa<UsingDecl>(Inst) || 1479 isa<UnresolvedUsingValueDecl>(Inst) || 1480 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1481 "instantiation did not produce a using decl"); 1482 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1483 InstantiatedFromUsingDecl[Inst] = Pattern; 1484 } 1485 1486 UsingEnumDecl * 1487 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1488 return InstantiatedFromUsingEnumDecl.lookup(UUD); 1489 } 1490 1491 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1492 UsingEnumDecl *Pattern) { 1493 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1494 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1495 } 1496 1497 UsingShadowDecl * 1498 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1499 return InstantiatedFromUsingShadowDecl.lookup(Inst); 1500 } 1501 1502 void 1503 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1504 UsingShadowDecl *Pattern) { 1505 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1506 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1507 } 1508 1509 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1510 return InstantiatedFromUnnamedFieldDecl.lookup(Field); 1511 } 1512 1513 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1514 FieldDecl *Tmpl) { 1515 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1516 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1517 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1518 "Already noted what unnamed field was instantiated from"); 1519 1520 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1521 } 1522 1523 ASTContext::overridden_cxx_method_iterator 1524 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1525 return overridden_methods(Method).begin(); 1526 } 1527 1528 ASTContext::overridden_cxx_method_iterator 1529 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1530 return overridden_methods(Method).end(); 1531 } 1532 1533 unsigned 1534 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1535 auto Range = overridden_methods(Method); 1536 return Range.end() - Range.begin(); 1537 } 1538 1539 ASTContext::overridden_method_range 1540 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1541 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1542 OverriddenMethods.find(Method->getCanonicalDecl()); 1543 if (Pos == OverriddenMethods.end()) 1544 return overridden_method_range(nullptr, nullptr); 1545 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1546 } 1547 1548 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1549 const CXXMethodDecl *Overridden) { 1550 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1551 OverriddenMethods[Method].push_back(Overridden); 1552 } 1553 1554 void ASTContext::getOverriddenMethods( 1555 const NamedDecl *D, 1556 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1557 assert(D); 1558 1559 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1560 Overridden.append(overridden_methods_begin(CXXMethod), 1561 overridden_methods_end(CXXMethod)); 1562 return; 1563 } 1564 1565 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1566 if (!Method) 1567 return; 1568 1569 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1570 Method->getOverriddenMethods(OverDecls); 1571 Overridden.append(OverDecls.begin(), OverDecls.end()); 1572 } 1573 1574 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1575 assert(!Import->getNextLocalImport() && 1576 "Import declaration already in the chain"); 1577 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1578 if (!FirstLocalImport) { 1579 FirstLocalImport = Import; 1580 LastLocalImport = Import; 1581 return; 1582 } 1583 1584 LastLocalImport->setNextLocalImport(Import); 1585 LastLocalImport = Import; 1586 } 1587 1588 //===----------------------------------------------------------------------===// 1589 // Type Sizing and Analysis 1590 //===----------------------------------------------------------------------===// 1591 1592 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1593 /// scalar floating point type. 1594 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1595 switch (T->castAs<BuiltinType>()->getKind()) { 1596 default: 1597 llvm_unreachable("Not a floating point type!"); 1598 case BuiltinType::BFloat16: 1599 return Target->getBFloat16Format(); 1600 case BuiltinType::Float16: 1601 return Target->getHalfFormat(); 1602 case BuiltinType::Half: 1603 // For HLSL, when the native half type is disabled, half will be treat as 1604 // float. 1605 if (getLangOpts().HLSL) 1606 if (getLangOpts().NativeHalfType) 1607 return Target->getHalfFormat(); 1608 else 1609 return Target->getFloatFormat(); 1610 else 1611 return Target->getHalfFormat(); 1612 case BuiltinType::Float: return Target->getFloatFormat(); 1613 case BuiltinType::Double: return Target->getDoubleFormat(); 1614 case BuiltinType::Ibm128: 1615 return Target->getIbm128Format(); 1616 case BuiltinType::LongDouble: 1617 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1618 return AuxTarget->getLongDoubleFormat(); 1619 return Target->getLongDoubleFormat(); 1620 case BuiltinType::Float128: 1621 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1622 return AuxTarget->getFloat128Format(); 1623 return Target->getFloat128Format(); 1624 } 1625 } 1626 1627 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1628 unsigned Align = Target->getCharWidth(); 1629 1630 const unsigned AlignFromAttr = D->getMaxAlignment(); 1631 if (AlignFromAttr) 1632 Align = AlignFromAttr; 1633 1634 // __attribute__((aligned)) can increase or decrease alignment 1635 // *except* on a struct or struct member, where it only increases 1636 // alignment unless 'packed' is also specified. 1637 // 1638 // It is an error for alignas to decrease alignment, so we can 1639 // ignore that possibility; Sema should diagnose it. 1640 bool UseAlignAttrOnly; 1641 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) 1642 UseAlignAttrOnly = 1643 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>(); 1644 else 1645 UseAlignAttrOnly = AlignFromAttr != 0; 1646 // If we're using the align attribute only, just ignore everything 1647 // else about the declaration and its type. 1648 if (UseAlignAttrOnly) { 1649 // do nothing 1650 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1651 QualType T = VD->getType(); 1652 if (const auto *RT = T->getAs<ReferenceType>()) { 1653 if (ForAlignof) 1654 T = RT->getPointeeType(); 1655 else 1656 T = getPointerType(RT->getPointeeType()); 1657 } 1658 QualType BaseT = getBaseElementType(T); 1659 if (T->isFunctionType()) 1660 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1661 else if (!BaseT->isIncompleteType()) { 1662 // Adjust alignments of declarations with array type by the 1663 // large-array alignment on the target. 1664 if (const ArrayType *arrayType = getAsArrayType(T)) { 1665 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1666 if (!ForAlignof && MinWidth) { 1667 if (isa<VariableArrayType>(arrayType)) 1668 Align = std::max(Align, Target->getLargeArrayAlign()); 1669 else if (isa<ConstantArrayType>(arrayType) && 1670 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1671 Align = std::max(Align, Target->getLargeArrayAlign()); 1672 } 1673 } 1674 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1675 if (BaseT.getQualifiers().hasUnaligned()) 1676 Align = Target->getCharWidth(); 1677 } 1678 1679 // Ensure miminum alignment for global variables. 1680 if (const auto *VD = dyn_cast<VarDecl>(D)) 1681 if (VD->hasGlobalStorage() && !ForAlignof) { 1682 uint64_t TypeSize = 1683 !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0; 1684 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1685 } 1686 1687 // Fields can be subject to extra alignment constraints, like if 1688 // the field is packed, the struct is packed, or the struct has a 1689 // a max-field-alignment constraint (#pragma pack). So calculate 1690 // the actual alignment of the field within the struct, and then 1691 // (as we're expected to) constrain that by the alignment of the type. 1692 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1693 const RecordDecl *Parent = Field->getParent(); 1694 // We can only produce a sensible answer if the record is valid. 1695 if (!Parent->isInvalidDecl()) { 1696 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1697 1698 // Start with the record's overall alignment. 1699 unsigned FieldAlign = toBits(Layout.getAlignment()); 1700 1701 // Use the GCD of that and the offset within the record. 1702 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1703 if (Offset > 0) { 1704 // Alignment is always a power of 2, so the GCD will be a power of 2, 1705 // which means we get to do this crazy thing instead of Euclid's. 1706 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1707 if (LowBitOfOffset < FieldAlign) 1708 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1709 } 1710 1711 Align = std::min(Align, FieldAlign); 1712 } 1713 } 1714 } 1715 1716 // Some targets have hard limitation on the maximum requestable alignment in 1717 // aligned attribute for static variables. 1718 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1719 const auto *VD = dyn_cast<VarDecl>(D); 1720 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1721 Align = std::min(Align, MaxAlignedAttr); 1722 1723 return toCharUnitsFromBits(Align); 1724 } 1725 1726 CharUnits ASTContext::getExnObjectAlignment() const { 1727 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1728 } 1729 1730 // getTypeInfoDataSizeInChars - Return the size of a type, in 1731 // chars. If the type is a record, its data size is returned. This is 1732 // the size of the memcpy that's performed when assigning this type 1733 // using a trivial copy/move assignment operator. 1734 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1735 TypeInfoChars Info = getTypeInfoInChars(T); 1736 1737 // In C++, objects can sometimes be allocated into the tail padding 1738 // of a base-class subobject. We decide whether that's possible 1739 // during class layout, so here we can just trust the layout results. 1740 if (getLangOpts().CPlusPlus) { 1741 if (const auto *RT = T->getAs<RecordType>()) { 1742 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1743 Info.Width = layout.getDataSize(); 1744 } 1745 } 1746 1747 return Info; 1748 } 1749 1750 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1751 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1752 TypeInfoChars 1753 static getConstantArrayInfoInChars(const ASTContext &Context, 1754 const ConstantArrayType *CAT) { 1755 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1756 uint64_t Size = CAT->getSize().getZExtValue(); 1757 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1758 (uint64_t)(-1)/Size) && 1759 "Overflow in array type char size evaluation"); 1760 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1761 unsigned Align = EltInfo.Align.getQuantity(); 1762 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1763 Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1764 Width = llvm::alignTo(Width, Align); 1765 return TypeInfoChars(CharUnits::fromQuantity(Width), 1766 CharUnits::fromQuantity(Align), 1767 EltInfo.AlignRequirement); 1768 } 1769 1770 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1771 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1772 return getConstantArrayInfoInChars(*this, CAT); 1773 TypeInfo Info = getTypeInfo(T); 1774 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1775 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1776 } 1777 1778 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1779 return getTypeInfoInChars(T.getTypePtr()); 1780 } 1781 1782 bool ASTContext::isPromotableIntegerType(QualType T) const { 1783 // HLSL doesn't promote all small integer types to int, it 1784 // just uses the rank-based promotion rules for all types. 1785 if (getLangOpts().HLSL) 1786 return false; 1787 1788 if (const auto *BT = T->getAs<BuiltinType>()) 1789 switch (BT->getKind()) { 1790 case BuiltinType::Bool: 1791 case BuiltinType::Char_S: 1792 case BuiltinType::Char_U: 1793 case BuiltinType::SChar: 1794 case BuiltinType::UChar: 1795 case BuiltinType::Short: 1796 case BuiltinType::UShort: 1797 case BuiltinType::WChar_S: 1798 case BuiltinType::WChar_U: 1799 case BuiltinType::Char8: 1800 case BuiltinType::Char16: 1801 case BuiltinType::Char32: 1802 return true; 1803 default: 1804 return false; 1805 } 1806 1807 // Enumerated types are promotable to their compatible integer types 1808 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). 1809 if (const auto *ET = T->getAs<EnumType>()) { 1810 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() || 1811 ET->getDecl()->isScoped()) 1812 return false; 1813 1814 return true; 1815 } 1816 1817 return false; 1818 } 1819 1820 bool ASTContext::isAlignmentRequired(const Type *T) const { 1821 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1822 } 1823 1824 bool ASTContext::isAlignmentRequired(QualType T) const { 1825 return isAlignmentRequired(T.getTypePtr()); 1826 } 1827 1828 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1829 bool NeedsPreferredAlignment) const { 1830 // An alignment on a typedef overrides anything else. 1831 if (const auto *TT = T->getAs<TypedefType>()) 1832 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1833 return Align; 1834 1835 // If we have an (array of) complete type, we're done. 1836 T = getBaseElementType(T); 1837 if (!T->isIncompleteType()) 1838 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1839 1840 // If we had an array type, its element type might be a typedef 1841 // type with an alignment attribute. 1842 if (const auto *TT = T->getAs<TypedefType>()) 1843 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1844 return Align; 1845 1846 // Otherwise, see if the declaration of the type had an attribute. 1847 if (const auto *TT = T->getAs<TagType>()) 1848 return TT->getDecl()->getMaxAlignment(); 1849 1850 return 0; 1851 } 1852 1853 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1854 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1855 if (I != MemoizedTypeInfo.end()) 1856 return I->second; 1857 1858 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1859 TypeInfo TI = getTypeInfoImpl(T); 1860 MemoizedTypeInfo[T] = TI; 1861 return TI; 1862 } 1863 1864 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1865 /// method does not work on incomplete types. 1866 /// 1867 /// FIXME: Pointers into different addr spaces could have different sizes and 1868 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1869 /// should take a QualType, &c. 1870 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1871 uint64_t Width = 0; 1872 unsigned Align = 8; 1873 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1874 LangAS AS = LangAS::Default; 1875 switch (T->getTypeClass()) { 1876 #define TYPE(Class, Base) 1877 #define ABSTRACT_TYPE(Class, Base) 1878 #define NON_CANONICAL_TYPE(Class, Base) 1879 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1880 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1881 case Type::Class: \ 1882 assert(!T->isDependentType() && "should not see dependent types here"); \ 1883 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1884 #include "clang/AST/TypeNodes.inc" 1885 llvm_unreachable("Should not see dependent types"); 1886 1887 case Type::FunctionNoProto: 1888 case Type::FunctionProto: 1889 // GCC extension: alignof(function) = 32 bits 1890 Width = 0; 1891 Align = 32; 1892 break; 1893 1894 case Type::IncompleteArray: 1895 case Type::VariableArray: 1896 case Type::ConstantArray: { 1897 // Model non-constant sized arrays as size zero, but track the alignment. 1898 uint64_t Size = 0; 1899 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1900 Size = CAT->getSize().getZExtValue(); 1901 1902 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1903 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1904 "Overflow in array type bit size evaluation"); 1905 Width = EltInfo.Width * Size; 1906 Align = EltInfo.Align; 1907 AlignRequirement = EltInfo.AlignRequirement; 1908 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1909 getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1910 Width = llvm::alignTo(Width, Align); 1911 break; 1912 } 1913 1914 case Type::ExtVector: 1915 case Type::Vector: { 1916 const auto *VT = cast<VectorType>(T); 1917 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1918 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 1919 : EltInfo.Width * VT->getNumElements(); 1920 // Enforce at least byte size and alignment. 1921 Width = std::max<unsigned>(8, Width); 1922 Align = std::max<unsigned>(8, Width); 1923 1924 // If the alignment is not a power of 2, round up to the next power of 2. 1925 // This happens for non-power-of-2 length vectors. 1926 if (Align & (Align-1)) { 1927 Align = llvm::bit_ceil(Align); 1928 Width = llvm::alignTo(Width, Align); 1929 } 1930 // Adjust the alignment based on the target max. 1931 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1932 if (TargetVectorAlign && TargetVectorAlign < Align) 1933 Align = TargetVectorAlign; 1934 if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 1935 // Adjust the alignment for fixed-length SVE vectors. This is important 1936 // for non-power-of-2 vector lengths. 1937 Align = 128; 1938 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 1939 // Adjust the alignment for fixed-length SVE predicates. 1940 Align = 16; 1941 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData) 1942 // Adjust the alignment for fixed-length RVV vectors. 1943 Align = std::min<unsigned>(64, Width); 1944 break; 1945 } 1946 1947 case Type::ConstantMatrix: { 1948 const auto *MT = cast<ConstantMatrixType>(T); 1949 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 1950 // The internal layout of a matrix value is implementation defined. 1951 // Initially be ABI compatible with arrays with respect to alignment and 1952 // size. 1953 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 1954 Align = ElementInfo.Align; 1955 break; 1956 } 1957 1958 case Type::Builtin: 1959 switch (cast<BuiltinType>(T)->getKind()) { 1960 default: llvm_unreachable("Unknown builtin type!"); 1961 case BuiltinType::Void: 1962 // GCC extension: alignof(void) = 8 bits. 1963 Width = 0; 1964 Align = 8; 1965 break; 1966 case BuiltinType::Bool: 1967 Width = Target->getBoolWidth(); 1968 Align = Target->getBoolAlign(); 1969 break; 1970 case BuiltinType::Char_S: 1971 case BuiltinType::Char_U: 1972 case BuiltinType::UChar: 1973 case BuiltinType::SChar: 1974 case BuiltinType::Char8: 1975 Width = Target->getCharWidth(); 1976 Align = Target->getCharAlign(); 1977 break; 1978 case BuiltinType::WChar_S: 1979 case BuiltinType::WChar_U: 1980 Width = Target->getWCharWidth(); 1981 Align = Target->getWCharAlign(); 1982 break; 1983 case BuiltinType::Char16: 1984 Width = Target->getChar16Width(); 1985 Align = Target->getChar16Align(); 1986 break; 1987 case BuiltinType::Char32: 1988 Width = Target->getChar32Width(); 1989 Align = Target->getChar32Align(); 1990 break; 1991 case BuiltinType::UShort: 1992 case BuiltinType::Short: 1993 Width = Target->getShortWidth(); 1994 Align = Target->getShortAlign(); 1995 break; 1996 case BuiltinType::UInt: 1997 case BuiltinType::Int: 1998 Width = Target->getIntWidth(); 1999 Align = Target->getIntAlign(); 2000 break; 2001 case BuiltinType::ULong: 2002 case BuiltinType::Long: 2003 Width = Target->getLongWidth(); 2004 Align = Target->getLongAlign(); 2005 break; 2006 case BuiltinType::ULongLong: 2007 case BuiltinType::LongLong: 2008 Width = Target->getLongLongWidth(); 2009 Align = Target->getLongLongAlign(); 2010 break; 2011 case BuiltinType::Int128: 2012 case BuiltinType::UInt128: 2013 Width = 128; 2014 Align = Target->getInt128Align(); 2015 break; 2016 case BuiltinType::ShortAccum: 2017 case BuiltinType::UShortAccum: 2018 case BuiltinType::SatShortAccum: 2019 case BuiltinType::SatUShortAccum: 2020 Width = Target->getShortAccumWidth(); 2021 Align = Target->getShortAccumAlign(); 2022 break; 2023 case BuiltinType::Accum: 2024 case BuiltinType::UAccum: 2025 case BuiltinType::SatAccum: 2026 case BuiltinType::SatUAccum: 2027 Width = Target->getAccumWidth(); 2028 Align = Target->getAccumAlign(); 2029 break; 2030 case BuiltinType::LongAccum: 2031 case BuiltinType::ULongAccum: 2032 case BuiltinType::SatLongAccum: 2033 case BuiltinType::SatULongAccum: 2034 Width = Target->getLongAccumWidth(); 2035 Align = Target->getLongAccumAlign(); 2036 break; 2037 case BuiltinType::ShortFract: 2038 case BuiltinType::UShortFract: 2039 case BuiltinType::SatShortFract: 2040 case BuiltinType::SatUShortFract: 2041 Width = Target->getShortFractWidth(); 2042 Align = Target->getShortFractAlign(); 2043 break; 2044 case BuiltinType::Fract: 2045 case BuiltinType::UFract: 2046 case BuiltinType::SatFract: 2047 case BuiltinType::SatUFract: 2048 Width = Target->getFractWidth(); 2049 Align = Target->getFractAlign(); 2050 break; 2051 case BuiltinType::LongFract: 2052 case BuiltinType::ULongFract: 2053 case BuiltinType::SatLongFract: 2054 case BuiltinType::SatULongFract: 2055 Width = Target->getLongFractWidth(); 2056 Align = Target->getLongFractAlign(); 2057 break; 2058 case BuiltinType::BFloat16: 2059 if (Target->hasBFloat16Type()) { 2060 Width = Target->getBFloat16Width(); 2061 Align = Target->getBFloat16Align(); 2062 } else if ((getLangOpts().SYCLIsDevice || 2063 (getLangOpts().OpenMP && 2064 getLangOpts().OpenMPIsTargetDevice)) && 2065 AuxTarget->hasBFloat16Type()) { 2066 Width = AuxTarget->getBFloat16Width(); 2067 Align = AuxTarget->getBFloat16Align(); 2068 } 2069 break; 2070 case BuiltinType::Float16: 2071 case BuiltinType::Half: 2072 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2073 !getLangOpts().OpenMPIsTargetDevice) { 2074 Width = Target->getHalfWidth(); 2075 Align = Target->getHalfAlign(); 2076 } else { 2077 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2078 "Expected OpenMP device compilation."); 2079 Width = AuxTarget->getHalfWidth(); 2080 Align = AuxTarget->getHalfAlign(); 2081 } 2082 break; 2083 case BuiltinType::Float: 2084 Width = Target->getFloatWidth(); 2085 Align = Target->getFloatAlign(); 2086 break; 2087 case BuiltinType::Double: 2088 Width = Target->getDoubleWidth(); 2089 Align = Target->getDoubleAlign(); 2090 break; 2091 case BuiltinType::Ibm128: 2092 Width = Target->getIbm128Width(); 2093 Align = Target->getIbm128Align(); 2094 break; 2095 case BuiltinType::LongDouble: 2096 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2097 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2098 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2099 Width = AuxTarget->getLongDoubleWidth(); 2100 Align = AuxTarget->getLongDoubleAlign(); 2101 } else { 2102 Width = Target->getLongDoubleWidth(); 2103 Align = Target->getLongDoubleAlign(); 2104 } 2105 break; 2106 case BuiltinType::Float128: 2107 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2108 !getLangOpts().OpenMPIsTargetDevice) { 2109 Width = Target->getFloat128Width(); 2110 Align = Target->getFloat128Align(); 2111 } else { 2112 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2113 "Expected OpenMP device compilation."); 2114 Width = AuxTarget->getFloat128Width(); 2115 Align = AuxTarget->getFloat128Align(); 2116 } 2117 break; 2118 case BuiltinType::NullPtr: 2119 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*) 2120 Width = Target->getPointerWidth(LangAS::Default); 2121 Align = Target->getPointerAlign(LangAS::Default); 2122 break; 2123 case BuiltinType::ObjCId: 2124 case BuiltinType::ObjCClass: 2125 case BuiltinType::ObjCSel: 2126 Width = Target->getPointerWidth(LangAS::Default); 2127 Align = Target->getPointerAlign(LangAS::Default); 2128 break; 2129 case BuiltinType::OCLSampler: 2130 case BuiltinType::OCLEvent: 2131 case BuiltinType::OCLClkEvent: 2132 case BuiltinType::OCLQueue: 2133 case BuiltinType::OCLReserveID: 2134 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2135 case BuiltinType::Id: 2136 #include "clang/Basic/OpenCLImageTypes.def" 2137 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2138 case BuiltinType::Id: 2139 #include "clang/Basic/OpenCLExtensionTypes.def" 2140 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 2141 Width = Target->getPointerWidth(AS); 2142 Align = Target->getPointerAlign(AS); 2143 break; 2144 // The SVE types are effectively target-specific. The length of an 2145 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2146 // of 128 bits. There is one predicate bit for each vector byte, so the 2147 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2148 // 2149 // Because the length is only known at runtime, we use a dummy value 2150 // of 0 for the static length. The alignment values are those defined 2151 // by the Procedure Call Standard for the Arm Architecture. 2152 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2153 IsSigned, IsFP, IsBF) \ 2154 case BuiltinType::Id: \ 2155 Width = 0; \ 2156 Align = 128; \ 2157 break; 2158 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2159 case BuiltinType::Id: \ 2160 Width = 0; \ 2161 Align = 16; \ 2162 break; 2163 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \ 2164 case BuiltinType::Id: \ 2165 Width = 0; \ 2166 Align = 16; \ 2167 break; 2168 #include "clang/Basic/AArch64SVEACLETypes.def" 2169 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2170 case BuiltinType::Id: \ 2171 Width = Size; \ 2172 Align = Size; \ 2173 break; 2174 #include "clang/Basic/PPCTypes.def" 2175 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2176 IsFP, IsBF) \ 2177 case BuiltinType::Id: \ 2178 Width = 0; \ 2179 Align = ElBits; \ 2180 break; 2181 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2182 case BuiltinType::Id: \ 2183 Width = 0; \ 2184 Align = 8; \ 2185 break; 2186 #include "clang/Basic/RISCVVTypes.def" 2187 #define WASM_TYPE(Name, Id, SingletonId) \ 2188 case BuiltinType::Id: \ 2189 Width = 0; \ 2190 Align = 8; \ 2191 break; 2192 #include "clang/Basic/WebAssemblyReferenceTypes.def" 2193 } 2194 break; 2195 case Type::ObjCObjectPointer: 2196 Width = Target->getPointerWidth(LangAS::Default); 2197 Align = Target->getPointerAlign(LangAS::Default); 2198 break; 2199 case Type::BlockPointer: 2200 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace(); 2201 Width = Target->getPointerWidth(AS); 2202 Align = Target->getPointerAlign(AS); 2203 break; 2204 case Type::LValueReference: 2205 case Type::RValueReference: 2206 // alignof and sizeof should never enter this code path here, so we go 2207 // the pointer route. 2208 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace(); 2209 Width = Target->getPointerWidth(AS); 2210 Align = Target->getPointerAlign(AS); 2211 break; 2212 case Type::Pointer: 2213 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace(); 2214 Width = Target->getPointerWidth(AS); 2215 Align = Target->getPointerAlign(AS); 2216 break; 2217 case Type::MemberPointer: { 2218 const auto *MPT = cast<MemberPointerType>(T); 2219 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2220 Width = MPI.Width; 2221 Align = MPI.Align; 2222 break; 2223 } 2224 case Type::Complex: { 2225 // Complex types have the same alignment as their elements, but twice the 2226 // size. 2227 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2228 Width = EltInfo.Width * 2; 2229 Align = EltInfo.Align; 2230 break; 2231 } 2232 case Type::ObjCObject: 2233 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2234 case Type::Adjusted: 2235 case Type::Decayed: 2236 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2237 case Type::ObjCInterface: { 2238 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2239 if (ObjCI->getDecl()->isInvalidDecl()) { 2240 Width = 8; 2241 Align = 8; 2242 break; 2243 } 2244 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2245 Width = toBits(Layout.getSize()); 2246 Align = toBits(Layout.getAlignment()); 2247 break; 2248 } 2249 case Type::BitInt: { 2250 const auto *EIT = cast<BitIntType>(T); 2251 Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()), 2252 getCharWidth(), Target->getLongLongAlign()); 2253 Width = llvm::alignTo(EIT->getNumBits(), Align); 2254 break; 2255 } 2256 case Type::Record: 2257 case Type::Enum: { 2258 const auto *TT = cast<TagType>(T); 2259 2260 if (TT->getDecl()->isInvalidDecl()) { 2261 Width = 8; 2262 Align = 8; 2263 break; 2264 } 2265 2266 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2267 const EnumDecl *ED = ET->getDecl(); 2268 TypeInfo Info = 2269 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2270 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2271 Info.Align = AttrAlign; 2272 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2273 } 2274 return Info; 2275 } 2276 2277 const auto *RT = cast<RecordType>(TT); 2278 const RecordDecl *RD = RT->getDecl(); 2279 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2280 Width = toBits(Layout.getSize()); 2281 Align = toBits(Layout.getAlignment()); 2282 AlignRequirement = RD->hasAttr<AlignedAttr>() 2283 ? AlignRequirementKind::RequiredByRecord 2284 : AlignRequirementKind::None; 2285 break; 2286 } 2287 2288 case Type::SubstTemplateTypeParm: 2289 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2290 getReplacementType().getTypePtr()); 2291 2292 case Type::Auto: 2293 case Type::DeducedTemplateSpecialization: { 2294 const auto *A = cast<DeducedType>(T); 2295 assert(!A->getDeducedType().isNull() && 2296 "cannot request the size of an undeduced or dependent auto type"); 2297 return getTypeInfo(A->getDeducedType().getTypePtr()); 2298 } 2299 2300 case Type::Paren: 2301 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2302 2303 case Type::MacroQualified: 2304 return getTypeInfo( 2305 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2306 2307 case Type::ObjCTypeParam: 2308 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2309 2310 case Type::Using: 2311 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2312 2313 case Type::Typedef: { 2314 const auto *TT = cast<TypedefType>(T); 2315 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr()); 2316 // If the typedef has an aligned attribute on it, it overrides any computed 2317 // alignment we have. This violates the GCC documentation (which says that 2318 // attribute(aligned) can only round up) but matches its implementation. 2319 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) { 2320 Align = AttrAlign; 2321 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2322 } else { 2323 Align = Info.Align; 2324 AlignRequirement = Info.AlignRequirement; 2325 } 2326 Width = Info.Width; 2327 break; 2328 } 2329 2330 case Type::Elaborated: 2331 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2332 2333 case Type::Attributed: 2334 return getTypeInfo( 2335 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2336 2337 case Type::BTFTagAttributed: 2338 return getTypeInfo( 2339 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2340 2341 case Type::Atomic: { 2342 // Start with the base type information. 2343 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2344 Width = Info.Width; 2345 Align = Info.Align; 2346 2347 if (!Width) { 2348 // An otherwise zero-sized type should still generate an 2349 // atomic operation. 2350 Width = Target->getCharWidth(); 2351 assert(Align); 2352 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2353 // If the size of the type doesn't exceed the platform's max 2354 // atomic promotion width, make the size and alignment more 2355 // favorable to atomic operations: 2356 2357 // Round the size up to a power of 2. 2358 Width = llvm::bit_ceil(Width); 2359 2360 // Set the alignment equal to the size. 2361 Align = static_cast<unsigned>(Width); 2362 } 2363 } 2364 break; 2365 2366 case Type::Pipe: 2367 Width = Target->getPointerWidth(LangAS::opencl_global); 2368 Align = Target->getPointerAlign(LangAS::opencl_global); 2369 break; 2370 } 2371 2372 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2373 return TypeInfo(Width, Align, AlignRequirement); 2374 } 2375 2376 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2377 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2378 if (I != MemoizedUnadjustedAlign.end()) 2379 return I->second; 2380 2381 unsigned UnadjustedAlign; 2382 if (const auto *RT = T->getAs<RecordType>()) { 2383 const RecordDecl *RD = RT->getDecl(); 2384 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2385 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2386 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2387 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2388 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2389 } else { 2390 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2391 } 2392 2393 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2394 return UnadjustedAlign; 2395 } 2396 2397 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2398 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign( 2399 getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap); 2400 return SimdAlign; 2401 } 2402 2403 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2404 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2405 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2406 } 2407 2408 /// toBits - Convert a size in characters to a size in characters. 2409 int64_t ASTContext::toBits(CharUnits CharSize) const { 2410 return CharSize.getQuantity() * getCharWidth(); 2411 } 2412 2413 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2414 /// This method does not work on incomplete types. 2415 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2416 return getTypeInfoInChars(T).Width; 2417 } 2418 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2419 return getTypeInfoInChars(T).Width; 2420 } 2421 2422 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2423 /// characters. This method does not work on incomplete types. 2424 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2425 return toCharUnitsFromBits(getTypeAlign(T)); 2426 } 2427 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2428 return toCharUnitsFromBits(getTypeAlign(T)); 2429 } 2430 2431 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2432 /// type, in characters, before alignment adjustments. This method does 2433 /// not work on incomplete types. 2434 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2435 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2436 } 2437 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2438 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2439 } 2440 2441 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2442 /// type for the current target in bits. This can be different than the ABI 2443 /// alignment in cases where it is beneficial for performance or backwards 2444 /// compatibility preserving to overalign a data type. (Note: despite the name, 2445 /// the preferred alignment is ABI-impacting, and not an optimization.) 2446 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2447 TypeInfo TI = getTypeInfo(T); 2448 unsigned ABIAlign = TI.Align; 2449 2450 T = T->getBaseElementTypeUnsafe(); 2451 2452 // The preferred alignment of member pointers is that of a pointer. 2453 if (T->isMemberPointerType()) 2454 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2455 2456 if (!Target->allowsLargerPreferedTypeAlignment()) 2457 return ABIAlign; 2458 2459 if (const auto *RT = T->getAs<RecordType>()) { 2460 const RecordDecl *RD = RT->getDecl(); 2461 2462 // When used as part of a typedef, or together with a 'packed' attribute, 2463 // the 'aligned' attribute can be used to decrease alignment. Note that the 2464 // 'packed' case is already taken into consideration when computing the 2465 // alignment, we only need to handle the typedef case here. 2466 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2467 RD->isInvalidDecl()) 2468 return ABIAlign; 2469 2470 unsigned PreferredAlign = static_cast<unsigned>( 2471 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2472 assert(PreferredAlign >= ABIAlign && 2473 "PreferredAlign should be at least as large as ABIAlign."); 2474 return PreferredAlign; 2475 } 2476 2477 // Double (and, for targets supporting AIX `power` alignment, long double) and 2478 // long long should be naturally aligned (despite requiring less alignment) if 2479 // possible. 2480 if (const auto *CT = T->getAs<ComplexType>()) 2481 T = CT->getElementType().getTypePtr(); 2482 if (const auto *ET = T->getAs<EnumType>()) 2483 T = ET->getDecl()->getIntegerType().getTypePtr(); 2484 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2485 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2486 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2487 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2488 Target->defaultsToAIXPowerAlignment())) 2489 // Don't increase the alignment if an alignment attribute was specified on a 2490 // typedef declaration. 2491 if (!TI.isAlignRequired()) 2492 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2493 2494 return ABIAlign; 2495 } 2496 2497 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2498 /// for __attribute__((aligned)) on this target, to be used if no alignment 2499 /// value is specified. 2500 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2501 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2502 } 2503 2504 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2505 /// to a global variable of the specified type. 2506 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2507 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2508 return std::max(getPreferredTypeAlign(T), 2509 getTargetInfo().getMinGlobalAlign(TypeSize)); 2510 } 2511 2512 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2513 /// should be given to a global variable of the specified type. 2514 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2515 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2516 } 2517 2518 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2519 CharUnits Offset = CharUnits::Zero(); 2520 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2521 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2522 Offset += Layout->getBaseClassOffset(Base); 2523 Layout = &getASTRecordLayout(Base); 2524 } 2525 return Offset; 2526 } 2527 2528 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2529 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2530 CharUnits ThisAdjustment = CharUnits::Zero(); 2531 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2532 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2533 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2534 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2535 const CXXRecordDecl *Base = RD; 2536 const CXXRecordDecl *Derived = Path[I]; 2537 if (DerivedMember) 2538 std::swap(Base, Derived); 2539 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2540 RD = Path[I]; 2541 } 2542 if (DerivedMember) 2543 ThisAdjustment = -ThisAdjustment; 2544 return ThisAdjustment; 2545 } 2546 2547 /// DeepCollectObjCIvars - 2548 /// This routine first collects all declared, but not synthesized, ivars in 2549 /// super class and then collects all ivars, including those synthesized for 2550 /// current class. This routine is used for implementation of current class 2551 /// when all ivars, declared and synthesized are known. 2552 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2553 bool leafClass, 2554 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2555 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2556 DeepCollectObjCIvars(SuperClass, false, Ivars); 2557 if (!leafClass) { 2558 llvm::append_range(Ivars, OI->ivars()); 2559 } else { 2560 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2561 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2562 Iv= Iv->getNextIvar()) 2563 Ivars.push_back(Iv); 2564 } 2565 } 2566 2567 /// CollectInheritedProtocols - Collect all protocols in current class and 2568 /// those inherited by it. 2569 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2570 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2571 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2572 // We can use protocol_iterator here instead of 2573 // all_referenced_protocol_iterator since we are walking all categories. 2574 for (auto *Proto : OI->all_referenced_protocols()) { 2575 CollectInheritedProtocols(Proto, Protocols); 2576 } 2577 2578 // Categories of this Interface. 2579 for (const auto *Cat : OI->visible_categories()) 2580 CollectInheritedProtocols(Cat, Protocols); 2581 2582 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2583 while (SD) { 2584 CollectInheritedProtocols(SD, Protocols); 2585 SD = SD->getSuperClass(); 2586 } 2587 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2588 for (auto *Proto : OC->protocols()) { 2589 CollectInheritedProtocols(Proto, Protocols); 2590 } 2591 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2592 // Insert the protocol. 2593 if (!Protocols.insert( 2594 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2595 return; 2596 2597 for (auto *Proto : OP->protocols()) 2598 CollectInheritedProtocols(Proto, Protocols); 2599 } 2600 } 2601 2602 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2603 const RecordDecl *RD, 2604 bool CheckIfTriviallyCopyable) { 2605 assert(RD->isUnion() && "Must be union type"); 2606 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2607 2608 for (const auto *Field : RD->fields()) { 2609 if (!Context.hasUniqueObjectRepresentations(Field->getType(), 2610 CheckIfTriviallyCopyable)) 2611 return false; 2612 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2613 if (FieldSize != UnionSize) 2614 return false; 2615 } 2616 return !RD->field_empty(); 2617 } 2618 2619 static int64_t getSubobjectOffset(const FieldDecl *Field, 2620 const ASTContext &Context, 2621 const clang::ASTRecordLayout & /*Layout*/) { 2622 return Context.getFieldOffset(Field); 2623 } 2624 2625 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2626 const ASTContext &Context, 2627 const clang::ASTRecordLayout &Layout) { 2628 return Context.toBits(Layout.getBaseClassOffset(RD)); 2629 } 2630 2631 static std::optional<int64_t> 2632 structHasUniqueObjectRepresentations(const ASTContext &Context, 2633 const RecordDecl *RD, 2634 bool CheckIfTriviallyCopyable); 2635 2636 static std::optional<int64_t> 2637 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context, 2638 bool CheckIfTriviallyCopyable) { 2639 if (Field->getType()->isRecordType()) { 2640 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2641 if (!RD->isUnion()) 2642 return structHasUniqueObjectRepresentations(Context, RD, 2643 CheckIfTriviallyCopyable); 2644 } 2645 2646 // A _BitInt type may not be unique if it has padding bits 2647 // but if it is a bitfield the padding bits are not used. 2648 bool IsBitIntType = Field->getType()->isBitIntType(); 2649 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2650 !Context.hasUniqueObjectRepresentations(Field->getType(), 2651 CheckIfTriviallyCopyable)) 2652 return std::nullopt; 2653 2654 int64_t FieldSizeInBits = 2655 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2656 if (Field->isBitField()) { 2657 // If we have explicit padding bits, they don't contribute bits 2658 // to the actual object representation, so return 0. 2659 if (Field->isUnnamedBitfield()) 2660 return 0; 2661 2662 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2663 if (IsBitIntType) { 2664 if ((unsigned)BitfieldSize > 2665 cast<BitIntType>(Field->getType())->getNumBits()) 2666 return std::nullopt; 2667 } else if (BitfieldSize > FieldSizeInBits) { 2668 return std::nullopt; 2669 } 2670 FieldSizeInBits = BitfieldSize; 2671 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations( 2672 Field->getType(), CheckIfTriviallyCopyable)) { 2673 return std::nullopt; 2674 } 2675 return FieldSizeInBits; 2676 } 2677 2678 static std::optional<int64_t> 2679 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context, 2680 bool CheckIfTriviallyCopyable) { 2681 return structHasUniqueObjectRepresentations(Context, RD, 2682 CheckIfTriviallyCopyable); 2683 } 2684 2685 template <typename RangeT> 2686 static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2687 const RangeT &Subobjects, int64_t CurOffsetInBits, 2688 const ASTContext &Context, const clang::ASTRecordLayout &Layout, 2689 bool CheckIfTriviallyCopyable) { 2690 for (const auto *Subobject : Subobjects) { 2691 std::optional<int64_t> SizeInBits = 2692 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable); 2693 if (!SizeInBits) 2694 return std::nullopt; 2695 if (*SizeInBits != 0) { 2696 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2697 if (Offset != CurOffsetInBits) 2698 return std::nullopt; 2699 CurOffsetInBits += *SizeInBits; 2700 } 2701 } 2702 return CurOffsetInBits; 2703 } 2704 2705 static std::optional<int64_t> 2706 structHasUniqueObjectRepresentations(const ASTContext &Context, 2707 const RecordDecl *RD, 2708 bool CheckIfTriviallyCopyable) { 2709 assert(!RD->isUnion() && "Must be struct/class type"); 2710 const auto &Layout = Context.getASTRecordLayout(RD); 2711 2712 int64_t CurOffsetInBits = 0; 2713 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2714 if (ClassDecl->isDynamicClass()) 2715 return std::nullopt; 2716 2717 SmallVector<CXXRecordDecl *, 4> Bases; 2718 for (const auto &Base : ClassDecl->bases()) { 2719 // Empty types can be inherited from, and non-empty types can potentially 2720 // have tail padding, so just make sure there isn't an error. 2721 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2722 } 2723 2724 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2725 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2726 }); 2727 2728 std::optional<int64_t> OffsetAfterBases = 2729 structSubobjectsHaveUniqueObjectRepresentations( 2730 Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable); 2731 if (!OffsetAfterBases) 2732 return std::nullopt; 2733 CurOffsetInBits = *OffsetAfterBases; 2734 } 2735 2736 std::optional<int64_t> OffsetAfterFields = 2737 structSubobjectsHaveUniqueObjectRepresentations( 2738 RD->fields(), CurOffsetInBits, Context, Layout, 2739 CheckIfTriviallyCopyable); 2740 if (!OffsetAfterFields) 2741 return std::nullopt; 2742 CurOffsetInBits = *OffsetAfterFields; 2743 2744 return CurOffsetInBits; 2745 } 2746 2747 bool ASTContext::hasUniqueObjectRepresentations( 2748 QualType Ty, bool CheckIfTriviallyCopyable) const { 2749 // C++17 [meta.unary.prop]: 2750 // The predicate condition for a template specialization 2751 // has_unique_object_representations<T> shall be satisfied if and only if: 2752 // (9.1) - T is trivially copyable, and 2753 // (9.2) - any two objects of type T with the same value have the same 2754 // object representation, where: 2755 // - two objects of array or non-union class type are considered to have 2756 // the same value if their respective sequences of direct subobjects 2757 // have the same values, and 2758 // - two objects of union type are considered to have the same value if 2759 // they have the same active member and the corresponding members have 2760 // the same value. 2761 // The set of scalar types for which this condition holds is 2762 // implementation-defined. [ Note: If a type has padding bits, the condition 2763 // does not hold; otherwise, the condition holds true for unsigned integral 2764 // types. -- end note ] 2765 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2766 2767 // Arrays are unique only if their element type is unique. 2768 if (Ty->isArrayType()) 2769 return hasUniqueObjectRepresentations(getBaseElementType(Ty), 2770 CheckIfTriviallyCopyable); 2771 2772 // (9.1) - T is trivially copyable... 2773 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this)) 2774 return false; 2775 2776 // All integrals and enums are unique. 2777 if (Ty->isIntegralOrEnumerationType()) { 2778 // Except _BitInt types that have padding bits. 2779 if (const auto *BIT = Ty->getAs<BitIntType>()) 2780 return getTypeSize(BIT) == BIT->getNumBits(); 2781 2782 return true; 2783 } 2784 2785 // All other pointers are unique. 2786 if (Ty->isPointerType()) 2787 return true; 2788 2789 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 2790 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2791 2792 if (Ty->isRecordType()) { 2793 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2794 2795 if (Record->isInvalidDecl()) 2796 return false; 2797 2798 if (Record->isUnion()) 2799 return unionHasUniqueObjectRepresentations(*this, Record, 2800 CheckIfTriviallyCopyable); 2801 2802 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations( 2803 *this, Record, CheckIfTriviallyCopyable); 2804 2805 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); 2806 } 2807 2808 // FIXME: More cases to handle here (list by rsmith): 2809 // vectors (careful about, eg, vector of 3 foo) 2810 // _Complex int and friends 2811 // _Atomic T 2812 // Obj-C block pointers 2813 // Obj-C object pointers 2814 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2815 // clk_event_t, queue_t, reserve_id_t) 2816 // There're also Obj-C class types and the Obj-C selector type, but I think it 2817 // makes sense for those to return false here. 2818 2819 return false; 2820 } 2821 2822 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2823 unsigned count = 0; 2824 // Count ivars declared in class extension. 2825 for (const auto *Ext : OI->known_extensions()) 2826 count += Ext->ivar_size(); 2827 2828 // Count ivar defined in this class's implementation. This 2829 // includes synthesized ivars. 2830 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2831 count += ImplDecl->ivar_size(); 2832 2833 return count; 2834 } 2835 2836 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2837 if (!E) 2838 return false; 2839 2840 // nullptr_t is always treated as null. 2841 if (E->getType()->isNullPtrType()) return true; 2842 2843 if (E->getType()->isAnyPointerType() && 2844 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2845 Expr::NPC_ValueDependentIsNull)) 2846 return true; 2847 2848 // Unfortunately, __null has type 'int'. 2849 if (isa<GNUNullExpr>(E)) return true; 2850 2851 return false; 2852 } 2853 2854 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2855 /// exists. 2856 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2857 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2858 I = ObjCImpls.find(D); 2859 if (I != ObjCImpls.end()) 2860 return cast<ObjCImplementationDecl>(I->second); 2861 return nullptr; 2862 } 2863 2864 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2865 /// exists. 2866 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2867 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2868 I = ObjCImpls.find(D); 2869 if (I != ObjCImpls.end()) 2870 return cast<ObjCCategoryImplDecl>(I->second); 2871 return nullptr; 2872 } 2873 2874 /// Set the implementation of ObjCInterfaceDecl. 2875 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2876 ObjCImplementationDecl *ImplD) { 2877 assert(IFaceD && ImplD && "Passed null params"); 2878 ObjCImpls[IFaceD] = ImplD; 2879 } 2880 2881 /// Set the implementation of ObjCCategoryDecl. 2882 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2883 ObjCCategoryImplDecl *ImplD) { 2884 assert(CatD && ImplD && "Passed null params"); 2885 ObjCImpls[CatD] = ImplD; 2886 } 2887 2888 const ObjCMethodDecl * 2889 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2890 return ObjCMethodRedecls.lookup(MD); 2891 } 2892 2893 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2894 const ObjCMethodDecl *Redecl) { 2895 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2896 ObjCMethodRedecls[MD] = Redecl; 2897 } 2898 2899 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2900 const NamedDecl *ND) const { 2901 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2902 return ID; 2903 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2904 return CD->getClassInterface(); 2905 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2906 return IMD->getClassInterface(); 2907 2908 return nullptr; 2909 } 2910 2911 /// Get the copy initialization expression of VarDecl, or nullptr if 2912 /// none exists. 2913 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2914 assert(VD && "Passed null params"); 2915 assert(VD->hasAttr<BlocksAttr>() && 2916 "getBlockVarCopyInits - not __block var"); 2917 auto I = BlockVarCopyInits.find(VD); 2918 if (I != BlockVarCopyInits.end()) 2919 return I->second; 2920 return {nullptr, false}; 2921 } 2922 2923 /// Set the copy initialization expression of a block var decl. 2924 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2925 bool CanThrow) { 2926 assert(VD && CopyExpr && "Passed null params"); 2927 assert(VD->hasAttr<BlocksAttr>() && 2928 "setBlockVarCopyInits - not __block var"); 2929 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2930 } 2931 2932 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2933 unsigned DataSize) const { 2934 if (!DataSize) 2935 DataSize = TypeLoc::getFullDataSizeForType(T); 2936 else 2937 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2938 "incorrect data size provided to CreateTypeSourceInfo!"); 2939 2940 auto *TInfo = 2941 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2942 new (TInfo) TypeSourceInfo(T, DataSize); 2943 return TInfo; 2944 } 2945 2946 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2947 SourceLocation L) const { 2948 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2949 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2950 return DI; 2951 } 2952 2953 const ASTRecordLayout & 2954 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2955 return getObjCLayout(D, nullptr); 2956 } 2957 2958 const ASTRecordLayout & 2959 ASTContext::getASTObjCImplementationLayout( 2960 const ObjCImplementationDecl *D) const { 2961 return getObjCLayout(D->getClassInterface(), D); 2962 } 2963 2964 static auto getCanonicalTemplateArguments(const ASTContext &C, 2965 ArrayRef<TemplateArgument> Args, 2966 bool &AnyNonCanonArgs) { 2967 SmallVector<TemplateArgument, 16> CanonArgs(Args); 2968 for (auto &Arg : CanonArgs) { 2969 TemplateArgument OrigArg = Arg; 2970 Arg = C.getCanonicalTemplateArgument(Arg); 2971 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg); 2972 } 2973 return CanonArgs; 2974 } 2975 2976 //===----------------------------------------------------------------------===// 2977 // Type creation/memoization methods 2978 //===----------------------------------------------------------------------===// 2979 2980 QualType 2981 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 2982 unsigned fastQuals = quals.getFastQualifiers(); 2983 quals.removeFastQualifiers(); 2984 2985 // Check if we've already instantiated this type. 2986 llvm::FoldingSetNodeID ID; 2987 ExtQuals::Profile(ID, baseType, quals); 2988 void *insertPos = nullptr; 2989 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 2990 assert(eq->getQualifiers() == quals); 2991 return QualType(eq, fastQuals); 2992 } 2993 2994 // If the base type is not canonical, make the appropriate canonical type. 2995 QualType canon; 2996 if (!baseType->isCanonicalUnqualified()) { 2997 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 2998 canonSplit.Quals.addConsistentQualifiers(quals); 2999 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3000 3001 // Re-find the insert position. 3002 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3003 } 3004 3005 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals); 3006 ExtQualNodes.InsertNode(eq, insertPos); 3007 return QualType(eq, fastQuals); 3008 } 3009 3010 QualType ASTContext::getAddrSpaceQualType(QualType T, 3011 LangAS AddressSpace) const { 3012 QualType CanT = getCanonicalType(T); 3013 if (CanT.getAddressSpace() == AddressSpace) 3014 return T; 3015 3016 // If we are composing extended qualifiers together, merge together 3017 // into one ExtQuals node. 3018 QualifierCollector Quals; 3019 const Type *TypeNode = Quals.strip(T); 3020 3021 // If this type already has an address space specified, it cannot get 3022 // another one. 3023 assert(!Quals.hasAddressSpace() && 3024 "Type cannot be in multiple addr spaces!"); 3025 Quals.addAddressSpace(AddressSpace); 3026 3027 return getExtQualType(TypeNode, Quals); 3028 } 3029 3030 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3031 // If the type is not qualified with an address space, just return it 3032 // immediately. 3033 if (!T.hasAddressSpace()) 3034 return T; 3035 3036 // If we are composing extended qualifiers together, merge together 3037 // into one ExtQuals node. 3038 QualifierCollector Quals; 3039 const Type *TypeNode; 3040 3041 while (T.hasAddressSpace()) { 3042 TypeNode = Quals.strip(T); 3043 3044 // If the type no longer has an address space after stripping qualifiers, 3045 // jump out. 3046 if (!QualType(TypeNode, 0).hasAddressSpace()) 3047 break; 3048 3049 // There might be sugar in the way. Strip it and try again. 3050 T = T.getSingleStepDesugaredType(*this); 3051 } 3052 3053 Quals.removeAddressSpace(); 3054 3055 // Removal of the address space can mean there are no longer any 3056 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3057 // or required. 3058 if (Quals.hasNonFastQualifiers()) 3059 return getExtQualType(TypeNode, Quals); 3060 else 3061 return QualType(TypeNode, Quals.getFastQualifiers()); 3062 } 3063 3064 QualType ASTContext::getObjCGCQualType(QualType T, 3065 Qualifiers::GC GCAttr) const { 3066 QualType CanT = getCanonicalType(T); 3067 if (CanT.getObjCGCAttr() == GCAttr) 3068 return T; 3069 3070 if (const auto *ptr = T->getAs<PointerType>()) { 3071 QualType Pointee = ptr->getPointeeType(); 3072 if (Pointee->isAnyPointerType()) { 3073 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3074 return getPointerType(ResultType); 3075 } 3076 } 3077 3078 // If we are composing extended qualifiers together, merge together 3079 // into one ExtQuals node. 3080 QualifierCollector Quals; 3081 const Type *TypeNode = Quals.strip(T); 3082 3083 // If this type already has an ObjCGC specified, it cannot get 3084 // another one. 3085 assert(!Quals.hasObjCGCAttr() && 3086 "Type cannot have multiple ObjCGCs!"); 3087 Quals.addObjCGCAttr(GCAttr); 3088 3089 return getExtQualType(TypeNode, Quals); 3090 } 3091 3092 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3093 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3094 QualType Pointee = Ptr->getPointeeType(); 3095 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3096 return getPointerType(removeAddrSpaceQualType(Pointee)); 3097 } 3098 } 3099 return T; 3100 } 3101 3102 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3103 FunctionType::ExtInfo Info) { 3104 if (T->getExtInfo() == Info) 3105 return T; 3106 3107 QualType Result; 3108 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3109 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3110 } else { 3111 const auto *FPT = cast<FunctionProtoType>(T); 3112 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3113 EPI.ExtInfo = Info; 3114 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3115 } 3116 3117 return cast<FunctionType>(Result.getTypePtr()); 3118 } 3119 3120 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3121 QualType ResultType) { 3122 FD = FD->getMostRecentDecl(); 3123 while (true) { 3124 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3125 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3126 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3127 if (FunctionDecl *Next = FD->getPreviousDecl()) 3128 FD = Next; 3129 else 3130 break; 3131 } 3132 if (ASTMutationListener *L = getASTMutationListener()) 3133 L->DeducedReturnType(FD, ResultType); 3134 } 3135 3136 /// Get a function type and produce the equivalent function type with the 3137 /// specified exception specification. Type sugar that can be present on a 3138 /// declaration of a function with an exception specification is permitted 3139 /// and preserved. Other type sugar (for instance, typedefs) is not. 3140 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3141 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { 3142 // Might have some parens. 3143 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3144 return getParenType( 3145 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3146 3147 // Might be wrapped in a macro qualified type. 3148 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3149 return getMacroQualifiedType( 3150 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3151 MQT->getMacroIdentifier()); 3152 3153 // Might have a calling-convention attribute. 3154 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3155 return getAttributedType( 3156 AT->getAttrKind(), 3157 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3158 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3159 3160 // Anything else must be a function type. Rebuild it with the new exception 3161 // specification. 3162 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3163 return getFunctionType( 3164 Proto->getReturnType(), Proto->getParamTypes(), 3165 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3166 } 3167 3168 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3169 QualType U) const { 3170 return hasSameType(T, U) || 3171 (getLangOpts().CPlusPlus17 && 3172 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3173 getFunctionTypeWithExceptionSpec(U, EST_None))); 3174 } 3175 3176 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3177 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3178 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3179 SmallVector<QualType, 16> Args(Proto->param_types().size()); 3180 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3181 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]); 3182 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3183 } 3184 3185 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3186 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3187 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3188 } 3189 3190 return T; 3191 } 3192 3193 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3194 return hasSameType(T, U) || 3195 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3196 getFunctionTypeWithoutPtrSizes(U)); 3197 } 3198 3199 void ASTContext::adjustExceptionSpec( 3200 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3201 bool AsWritten) { 3202 // Update the type. 3203 QualType Updated = 3204 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3205 FD->setType(Updated); 3206 3207 if (!AsWritten) 3208 return; 3209 3210 // Update the type in the type source information too. 3211 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3212 // If the type and the type-as-written differ, we may need to update 3213 // the type-as-written too. 3214 if (TSInfo->getType() != FD->getType()) 3215 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3216 3217 // FIXME: When we get proper type location information for exceptions, 3218 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3219 // up the TypeSourceInfo; 3220 assert(TypeLoc::getFullDataSizeForType(Updated) == 3221 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3222 "TypeLoc size mismatch from updating exception specification"); 3223 TSInfo->overrideType(Updated); 3224 } 3225 } 3226 3227 /// getComplexType - Return the uniqued reference to the type for a complex 3228 /// number with the specified element type. 3229 QualType ASTContext::getComplexType(QualType T) const { 3230 // Unique pointers, to guarantee there is only one pointer of a particular 3231 // structure. 3232 llvm::FoldingSetNodeID ID; 3233 ComplexType::Profile(ID, T); 3234 3235 void *InsertPos = nullptr; 3236 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3237 return QualType(CT, 0); 3238 3239 // If the pointee type isn't canonical, this won't be a canonical type either, 3240 // so fill in the canonical type field. 3241 QualType Canonical; 3242 if (!T.isCanonical()) { 3243 Canonical = getComplexType(getCanonicalType(T)); 3244 3245 // Get the new insert position for the node we care about. 3246 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3247 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3248 } 3249 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical); 3250 Types.push_back(New); 3251 ComplexTypes.InsertNode(New, InsertPos); 3252 return QualType(New, 0); 3253 } 3254 3255 /// getPointerType - Return the uniqued reference to the type for a pointer to 3256 /// the specified type. 3257 QualType ASTContext::getPointerType(QualType T) const { 3258 // Unique pointers, to guarantee there is only one pointer of a particular 3259 // structure. 3260 llvm::FoldingSetNodeID ID; 3261 PointerType::Profile(ID, T); 3262 3263 void *InsertPos = nullptr; 3264 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3265 return QualType(PT, 0); 3266 3267 // If the pointee type isn't canonical, this won't be a canonical type either, 3268 // so fill in the canonical type field. 3269 QualType Canonical; 3270 if (!T.isCanonical()) { 3271 Canonical = getPointerType(getCanonicalType(T)); 3272 3273 // Get the new insert position for the node we care about. 3274 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3275 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3276 } 3277 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical); 3278 Types.push_back(New); 3279 PointerTypes.InsertNode(New, InsertPos); 3280 return QualType(New, 0); 3281 } 3282 3283 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3284 llvm::FoldingSetNodeID ID; 3285 AdjustedType::Profile(ID, Orig, New); 3286 void *InsertPos = nullptr; 3287 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3288 if (AT) 3289 return QualType(AT, 0); 3290 3291 QualType Canonical = getCanonicalType(New); 3292 3293 // Get the new insert position for the node we care about. 3294 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3295 assert(!AT && "Shouldn't be in the map!"); 3296 3297 AT = new (*this, alignof(AdjustedType)) 3298 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3299 Types.push_back(AT); 3300 AdjustedTypes.InsertNode(AT, InsertPos); 3301 return QualType(AT, 0); 3302 } 3303 3304 QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const { 3305 llvm::FoldingSetNodeID ID; 3306 AdjustedType::Profile(ID, Orig, Decayed); 3307 void *InsertPos = nullptr; 3308 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3309 if (AT) 3310 return QualType(AT, 0); 3311 3312 QualType Canonical = getCanonicalType(Decayed); 3313 3314 // Get the new insert position for the node we care about. 3315 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3316 assert(!AT && "Shouldn't be in the map!"); 3317 3318 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical); 3319 Types.push_back(AT); 3320 AdjustedTypes.InsertNode(AT, InsertPos); 3321 return QualType(AT, 0); 3322 } 3323 3324 QualType ASTContext::getDecayedType(QualType T) const { 3325 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3326 3327 QualType Decayed; 3328 3329 // C99 6.7.5.3p7: 3330 // A declaration of a parameter as "array of type" shall be 3331 // adjusted to "qualified pointer to type", where the type 3332 // qualifiers (if any) are those specified within the [ and ] of 3333 // the array type derivation. 3334 if (T->isArrayType()) 3335 Decayed = getArrayDecayedType(T); 3336 3337 // C99 6.7.5.3p8: 3338 // A declaration of a parameter as "function returning type" 3339 // shall be adjusted to "pointer to function returning type", as 3340 // in 6.3.2.1. 3341 if (T->isFunctionType()) 3342 Decayed = getPointerType(T); 3343 3344 return getDecayedType(T, Decayed); 3345 } 3346 3347 /// getBlockPointerType - Return the uniqued reference to the type for 3348 /// a pointer to the specified block. 3349 QualType ASTContext::getBlockPointerType(QualType T) const { 3350 assert(T->isFunctionType() && "block of function types only"); 3351 // Unique pointers, to guarantee there is only one block of a particular 3352 // structure. 3353 llvm::FoldingSetNodeID ID; 3354 BlockPointerType::Profile(ID, T); 3355 3356 void *InsertPos = nullptr; 3357 if (BlockPointerType *PT = 3358 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3359 return QualType(PT, 0); 3360 3361 // If the block pointee type isn't canonical, this won't be a canonical 3362 // type either so fill in the canonical type field. 3363 QualType Canonical; 3364 if (!T.isCanonical()) { 3365 Canonical = getBlockPointerType(getCanonicalType(T)); 3366 3367 // Get the new insert position for the node we care about. 3368 BlockPointerType *NewIP = 3369 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3370 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3371 } 3372 auto *New = 3373 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical); 3374 Types.push_back(New); 3375 BlockPointerTypes.InsertNode(New, InsertPos); 3376 return QualType(New, 0); 3377 } 3378 3379 /// getLValueReferenceType - Return the uniqued reference to the type for an 3380 /// lvalue reference to the specified type. 3381 QualType 3382 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3383 assert((!T->isPlaceholderType() || 3384 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3385 "Unresolved placeholder type"); 3386 3387 // Unique pointers, to guarantee there is only one pointer of a particular 3388 // structure. 3389 llvm::FoldingSetNodeID ID; 3390 ReferenceType::Profile(ID, T, SpelledAsLValue); 3391 3392 void *InsertPos = nullptr; 3393 if (LValueReferenceType *RT = 3394 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3395 return QualType(RT, 0); 3396 3397 const auto *InnerRef = T->getAs<ReferenceType>(); 3398 3399 // If the referencee type isn't canonical, this won't be a canonical type 3400 // either, so fill in the canonical type field. 3401 QualType Canonical; 3402 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3403 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3404 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3405 3406 // Get the new insert position for the node we care about. 3407 LValueReferenceType *NewIP = 3408 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3409 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3410 } 3411 3412 auto *New = new (*this, alignof(LValueReferenceType)) 3413 LValueReferenceType(T, Canonical, SpelledAsLValue); 3414 Types.push_back(New); 3415 LValueReferenceTypes.InsertNode(New, InsertPos); 3416 3417 return QualType(New, 0); 3418 } 3419 3420 /// getRValueReferenceType - Return the uniqued reference to the type for an 3421 /// rvalue reference to the specified type. 3422 QualType ASTContext::getRValueReferenceType(QualType T) const { 3423 assert((!T->isPlaceholderType() || 3424 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3425 "Unresolved placeholder type"); 3426 3427 // Unique pointers, to guarantee there is only one pointer of a particular 3428 // structure. 3429 llvm::FoldingSetNodeID ID; 3430 ReferenceType::Profile(ID, T, false); 3431 3432 void *InsertPos = nullptr; 3433 if (RValueReferenceType *RT = 3434 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3435 return QualType(RT, 0); 3436 3437 const auto *InnerRef = T->getAs<ReferenceType>(); 3438 3439 // If the referencee type isn't canonical, this won't be a canonical type 3440 // either, so fill in the canonical type field. 3441 QualType Canonical; 3442 if (InnerRef || !T.isCanonical()) { 3443 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3444 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3445 3446 // Get the new insert position for the node we care about. 3447 RValueReferenceType *NewIP = 3448 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3449 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3450 } 3451 3452 auto *New = new (*this, alignof(RValueReferenceType)) 3453 RValueReferenceType(T, Canonical); 3454 Types.push_back(New); 3455 RValueReferenceTypes.InsertNode(New, InsertPos); 3456 return QualType(New, 0); 3457 } 3458 3459 /// getMemberPointerType - Return the uniqued reference to the type for a 3460 /// member pointer to the specified type, in the specified class. 3461 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3462 // Unique pointers, to guarantee there is only one pointer of a particular 3463 // structure. 3464 llvm::FoldingSetNodeID ID; 3465 MemberPointerType::Profile(ID, T, Cls); 3466 3467 void *InsertPos = nullptr; 3468 if (MemberPointerType *PT = 3469 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3470 return QualType(PT, 0); 3471 3472 // If the pointee or class type isn't canonical, this won't be a canonical 3473 // type either, so fill in the canonical type field. 3474 QualType Canonical; 3475 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3476 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3477 3478 // Get the new insert position for the node we care about. 3479 MemberPointerType *NewIP = 3480 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3481 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3482 } 3483 auto *New = new (*this, alignof(MemberPointerType)) 3484 MemberPointerType(T, Cls, Canonical); 3485 Types.push_back(New); 3486 MemberPointerTypes.InsertNode(New, InsertPos); 3487 return QualType(New, 0); 3488 } 3489 3490 /// getConstantArrayType - Return the unique reference to the type for an 3491 /// array of the specified element type. 3492 QualType ASTContext::getConstantArrayType(QualType EltTy, 3493 const llvm::APInt &ArySizeIn, 3494 const Expr *SizeExpr, 3495 ArraySizeModifier ASM, 3496 unsigned IndexTypeQuals) const { 3497 assert((EltTy->isDependentType() || 3498 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3499 "Constant array of VLAs is illegal!"); 3500 3501 // We only need the size as part of the type if it's instantiation-dependent. 3502 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3503 SizeExpr = nullptr; 3504 3505 // Convert the array size into a canonical width matching the pointer size for 3506 // the target. 3507 llvm::APInt ArySize(ArySizeIn); 3508 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3509 3510 llvm::FoldingSetNodeID ID; 3511 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3512 IndexTypeQuals); 3513 3514 void *InsertPos = nullptr; 3515 if (ConstantArrayType *ATP = 3516 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3517 return QualType(ATP, 0); 3518 3519 // If the element type isn't canonical or has qualifiers, or the array bound 3520 // is instantiation-dependent, this won't be a canonical type either, so fill 3521 // in the canonical type field. 3522 QualType Canon; 3523 // FIXME: Check below should look for qualifiers behind sugar. 3524 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3525 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3526 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3527 ASM, IndexTypeQuals); 3528 Canon = getQualifiedType(Canon, canonSplit.Quals); 3529 3530 // Get the new insert position for the node we care about. 3531 ConstantArrayType *NewIP = 3532 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3533 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3534 } 3535 3536 void *Mem = Allocate( 3537 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3538 alignof(ConstantArrayType)); 3539 auto *New = new (Mem) 3540 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3541 ConstantArrayTypes.InsertNode(New, InsertPos); 3542 Types.push_back(New); 3543 return QualType(New, 0); 3544 } 3545 3546 /// getVariableArrayDecayedType - Turns the given type, which may be 3547 /// variably-modified, into the corresponding type with all the known 3548 /// sizes replaced with [*]. 3549 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3550 // Vastly most common case. 3551 if (!type->isVariablyModifiedType()) return type; 3552 3553 QualType result; 3554 3555 SplitQualType split = type.getSplitDesugaredType(); 3556 const Type *ty = split.Ty; 3557 switch (ty->getTypeClass()) { 3558 #define TYPE(Class, Base) 3559 #define ABSTRACT_TYPE(Class, Base) 3560 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3561 #include "clang/AST/TypeNodes.inc" 3562 llvm_unreachable("didn't desugar past all non-canonical types?"); 3563 3564 // These types should never be variably-modified. 3565 case Type::Builtin: 3566 case Type::Complex: 3567 case Type::Vector: 3568 case Type::DependentVector: 3569 case Type::ExtVector: 3570 case Type::DependentSizedExtVector: 3571 case Type::ConstantMatrix: 3572 case Type::DependentSizedMatrix: 3573 case Type::DependentAddressSpace: 3574 case Type::ObjCObject: 3575 case Type::ObjCInterface: 3576 case Type::ObjCObjectPointer: 3577 case Type::Record: 3578 case Type::Enum: 3579 case Type::UnresolvedUsing: 3580 case Type::TypeOfExpr: 3581 case Type::TypeOf: 3582 case Type::Decltype: 3583 case Type::UnaryTransform: 3584 case Type::DependentName: 3585 case Type::InjectedClassName: 3586 case Type::TemplateSpecialization: 3587 case Type::DependentTemplateSpecialization: 3588 case Type::TemplateTypeParm: 3589 case Type::SubstTemplateTypeParmPack: 3590 case Type::Auto: 3591 case Type::DeducedTemplateSpecialization: 3592 case Type::PackExpansion: 3593 case Type::BitInt: 3594 case Type::DependentBitInt: 3595 llvm_unreachable("type should never be variably-modified"); 3596 3597 // These types can be variably-modified but should never need to 3598 // further decay. 3599 case Type::FunctionNoProto: 3600 case Type::FunctionProto: 3601 case Type::BlockPointer: 3602 case Type::MemberPointer: 3603 case Type::Pipe: 3604 return type; 3605 3606 // These types can be variably-modified. All these modifications 3607 // preserve structure except as noted by comments. 3608 // TODO: if we ever care about optimizing VLAs, there are no-op 3609 // optimizations available here. 3610 case Type::Pointer: 3611 result = getPointerType(getVariableArrayDecayedType( 3612 cast<PointerType>(ty)->getPointeeType())); 3613 break; 3614 3615 case Type::LValueReference: { 3616 const auto *lv = cast<LValueReferenceType>(ty); 3617 result = getLValueReferenceType( 3618 getVariableArrayDecayedType(lv->getPointeeType()), 3619 lv->isSpelledAsLValue()); 3620 break; 3621 } 3622 3623 case Type::RValueReference: { 3624 const auto *lv = cast<RValueReferenceType>(ty); 3625 result = getRValueReferenceType( 3626 getVariableArrayDecayedType(lv->getPointeeType())); 3627 break; 3628 } 3629 3630 case Type::Atomic: { 3631 const auto *at = cast<AtomicType>(ty); 3632 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3633 break; 3634 } 3635 3636 case Type::ConstantArray: { 3637 const auto *cat = cast<ConstantArrayType>(ty); 3638 result = getConstantArrayType( 3639 getVariableArrayDecayedType(cat->getElementType()), 3640 cat->getSize(), 3641 cat->getSizeExpr(), 3642 cat->getSizeModifier(), 3643 cat->getIndexTypeCVRQualifiers()); 3644 break; 3645 } 3646 3647 case Type::DependentSizedArray: { 3648 const auto *dat = cast<DependentSizedArrayType>(ty); 3649 result = getDependentSizedArrayType( 3650 getVariableArrayDecayedType(dat->getElementType()), 3651 dat->getSizeExpr(), 3652 dat->getSizeModifier(), 3653 dat->getIndexTypeCVRQualifiers(), 3654 dat->getBracketsRange()); 3655 break; 3656 } 3657 3658 // Turn incomplete types into [*] types. 3659 case Type::IncompleteArray: { 3660 const auto *iat = cast<IncompleteArrayType>(ty); 3661 result = 3662 getVariableArrayType(getVariableArrayDecayedType(iat->getElementType()), 3663 /*size*/ nullptr, ArraySizeModifier::Normal, 3664 iat->getIndexTypeCVRQualifiers(), SourceRange()); 3665 break; 3666 } 3667 3668 // Turn VLA types into [*] types. 3669 case Type::VariableArray: { 3670 const auto *vat = cast<VariableArrayType>(ty); 3671 result = getVariableArrayType( 3672 getVariableArrayDecayedType(vat->getElementType()), 3673 /*size*/ nullptr, ArraySizeModifier::Star, 3674 vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange()); 3675 break; 3676 } 3677 } 3678 3679 // Apply the top-level qualifiers from the original. 3680 return getQualifiedType(result, split.Quals); 3681 } 3682 3683 /// getVariableArrayType - Returns a non-unique reference to the type for a 3684 /// variable array of the specified element type. 3685 QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts, 3686 ArraySizeModifier ASM, 3687 unsigned IndexTypeQuals, 3688 SourceRange Brackets) const { 3689 // Since we don't unique expressions, it isn't possible to unique VLA's 3690 // that have an expression provided for their size. 3691 QualType Canon; 3692 3693 // Be sure to pull qualifiers off the element type. 3694 // FIXME: Check below should look for qualifiers behind sugar. 3695 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3696 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3697 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3698 IndexTypeQuals, Brackets); 3699 Canon = getQualifiedType(Canon, canonSplit.Quals); 3700 } 3701 3702 auto *New = new (*this, alignof(VariableArrayType)) 3703 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3704 3705 VariableArrayTypes.push_back(New); 3706 Types.push_back(New); 3707 return QualType(New, 0); 3708 } 3709 3710 /// getDependentSizedArrayType - Returns a non-unique reference to 3711 /// the type for a dependently-sized array of the specified element 3712 /// type. 3713 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3714 Expr *numElements, 3715 ArraySizeModifier ASM, 3716 unsigned elementTypeQuals, 3717 SourceRange brackets) const { 3718 assert((!numElements || numElements->isTypeDependent() || 3719 numElements->isValueDependent()) && 3720 "Size must be type- or value-dependent!"); 3721 3722 // Dependently-sized array types that do not have a specified number 3723 // of elements will have their sizes deduced from a dependent 3724 // initializer. We do no canonicalization here at all, which is okay 3725 // because they can't be used in most locations. 3726 if (!numElements) { 3727 auto *newType = new (*this, alignof(DependentSizedArrayType)) 3728 DependentSizedArrayType(elementType, QualType(), numElements, ASM, 3729 elementTypeQuals, brackets); 3730 Types.push_back(newType); 3731 return QualType(newType, 0); 3732 } 3733 3734 // Otherwise, we actually build a new type every time, but we 3735 // also build a canonical type. 3736 3737 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3738 3739 void *insertPos = nullptr; 3740 llvm::FoldingSetNodeID ID; 3741 DependentSizedArrayType::Profile(ID, *this, 3742 QualType(canonElementType.Ty, 0), 3743 ASM, elementTypeQuals, numElements); 3744 3745 // Look for an existing type with these properties. 3746 DependentSizedArrayType *canonTy = 3747 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3748 3749 // If we don't have one, build one. 3750 if (!canonTy) { 3751 canonTy = new (*this, alignof(DependentSizedArrayType)) 3752 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(), 3753 numElements, ASM, elementTypeQuals, brackets); 3754 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3755 Types.push_back(canonTy); 3756 } 3757 3758 // Apply qualifiers from the element type to the array. 3759 QualType canon = getQualifiedType(QualType(canonTy,0), 3760 canonElementType.Quals); 3761 3762 // If we didn't need extra canonicalization for the element type or the size 3763 // expression, then just use that as our result. 3764 if (QualType(canonElementType.Ty, 0) == elementType && 3765 canonTy->getSizeExpr() == numElements) 3766 return canon; 3767 3768 // Otherwise, we need to build a type which follows the spelling 3769 // of the element type. 3770 auto *sugaredType = new (*this, alignof(DependentSizedArrayType)) 3771 DependentSizedArrayType(elementType, canon, numElements, ASM, 3772 elementTypeQuals, brackets); 3773 Types.push_back(sugaredType); 3774 return QualType(sugaredType, 0); 3775 } 3776 3777 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3778 ArraySizeModifier ASM, 3779 unsigned elementTypeQuals) const { 3780 llvm::FoldingSetNodeID ID; 3781 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3782 3783 void *insertPos = nullptr; 3784 if (IncompleteArrayType *iat = 3785 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3786 return QualType(iat, 0); 3787 3788 // If the element type isn't canonical, this won't be a canonical type 3789 // either, so fill in the canonical type field. We also have to pull 3790 // qualifiers off the element type. 3791 QualType canon; 3792 3793 // FIXME: Check below should look for qualifiers behind sugar. 3794 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3795 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3796 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3797 ASM, elementTypeQuals); 3798 canon = getQualifiedType(canon, canonSplit.Quals); 3799 3800 // Get the new insert position for the node we care about. 3801 IncompleteArrayType *existing = 3802 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3803 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3804 } 3805 3806 auto *newType = new (*this, alignof(IncompleteArrayType)) 3807 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3808 3809 IncompleteArrayTypes.InsertNode(newType, insertPos); 3810 Types.push_back(newType); 3811 return QualType(newType, 0); 3812 } 3813 3814 ASTContext::BuiltinVectorTypeInfo 3815 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3816 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3817 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3818 NUMVECTORS}; 3819 3820 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3821 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3822 3823 switch (Ty->getKind()) { 3824 default: 3825 llvm_unreachable("Unsupported builtin vector type"); 3826 case BuiltinType::SveInt8: 3827 return SVE_INT_ELTTY(8, 16, true, 1); 3828 case BuiltinType::SveUint8: 3829 return SVE_INT_ELTTY(8, 16, false, 1); 3830 case BuiltinType::SveInt8x2: 3831 return SVE_INT_ELTTY(8, 16, true, 2); 3832 case BuiltinType::SveUint8x2: 3833 return SVE_INT_ELTTY(8, 16, false, 2); 3834 case BuiltinType::SveInt8x3: 3835 return SVE_INT_ELTTY(8, 16, true, 3); 3836 case BuiltinType::SveUint8x3: 3837 return SVE_INT_ELTTY(8, 16, false, 3); 3838 case BuiltinType::SveInt8x4: 3839 return SVE_INT_ELTTY(8, 16, true, 4); 3840 case BuiltinType::SveUint8x4: 3841 return SVE_INT_ELTTY(8, 16, false, 4); 3842 case BuiltinType::SveInt16: 3843 return SVE_INT_ELTTY(16, 8, true, 1); 3844 case BuiltinType::SveUint16: 3845 return SVE_INT_ELTTY(16, 8, false, 1); 3846 case BuiltinType::SveInt16x2: 3847 return SVE_INT_ELTTY(16, 8, true, 2); 3848 case BuiltinType::SveUint16x2: 3849 return SVE_INT_ELTTY(16, 8, false, 2); 3850 case BuiltinType::SveInt16x3: 3851 return SVE_INT_ELTTY(16, 8, true, 3); 3852 case BuiltinType::SveUint16x3: 3853 return SVE_INT_ELTTY(16, 8, false, 3); 3854 case BuiltinType::SveInt16x4: 3855 return SVE_INT_ELTTY(16, 8, true, 4); 3856 case BuiltinType::SveUint16x4: 3857 return SVE_INT_ELTTY(16, 8, false, 4); 3858 case BuiltinType::SveInt32: 3859 return SVE_INT_ELTTY(32, 4, true, 1); 3860 case BuiltinType::SveUint32: 3861 return SVE_INT_ELTTY(32, 4, false, 1); 3862 case BuiltinType::SveInt32x2: 3863 return SVE_INT_ELTTY(32, 4, true, 2); 3864 case BuiltinType::SveUint32x2: 3865 return SVE_INT_ELTTY(32, 4, false, 2); 3866 case BuiltinType::SveInt32x3: 3867 return SVE_INT_ELTTY(32, 4, true, 3); 3868 case BuiltinType::SveUint32x3: 3869 return SVE_INT_ELTTY(32, 4, false, 3); 3870 case BuiltinType::SveInt32x4: 3871 return SVE_INT_ELTTY(32, 4, true, 4); 3872 case BuiltinType::SveUint32x4: 3873 return SVE_INT_ELTTY(32, 4, false, 4); 3874 case BuiltinType::SveInt64: 3875 return SVE_INT_ELTTY(64, 2, true, 1); 3876 case BuiltinType::SveUint64: 3877 return SVE_INT_ELTTY(64, 2, false, 1); 3878 case BuiltinType::SveInt64x2: 3879 return SVE_INT_ELTTY(64, 2, true, 2); 3880 case BuiltinType::SveUint64x2: 3881 return SVE_INT_ELTTY(64, 2, false, 2); 3882 case BuiltinType::SveInt64x3: 3883 return SVE_INT_ELTTY(64, 2, true, 3); 3884 case BuiltinType::SveUint64x3: 3885 return SVE_INT_ELTTY(64, 2, false, 3); 3886 case BuiltinType::SveInt64x4: 3887 return SVE_INT_ELTTY(64, 2, true, 4); 3888 case BuiltinType::SveUint64x4: 3889 return SVE_INT_ELTTY(64, 2, false, 4); 3890 case BuiltinType::SveBool: 3891 return SVE_ELTTY(BoolTy, 16, 1); 3892 case BuiltinType::SveBoolx2: 3893 return SVE_ELTTY(BoolTy, 16, 2); 3894 case BuiltinType::SveBoolx4: 3895 return SVE_ELTTY(BoolTy, 16, 4); 3896 case BuiltinType::SveFloat16: 3897 return SVE_ELTTY(HalfTy, 8, 1); 3898 case BuiltinType::SveFloat16x2: 3899 return SVE_ELTTY(HalfTy, 8, 2); 3900 case BuiltinType::SveFloat16x3: 3901 return SVE_ELTTY(HalfTy, 8, 3); 3902 case BuiltinType::SveFloat16x4: 3903 return SVE_ELTTY(HalfTy, 8, 4); 3904 case BuiltinType::SveFloat32: 3905 return SVE_ELTTY(FloatTy, 4, 1); 3906 case BuiltinType::SveFloat32x2: 3907 return SVE_ELTTY(FloatTy, 4, 2); 3908 case BuiltinType::SveFloat32x3: 3909 return SVE_ELTTY(FloatTy, 4, 3); 3910 case BuiltinType::SveFloat32x4: 3911 return SVE_ELTTY(FloatTy, 4, 4); 3912 case BuiltinType::SveFloat64: 3913 return SVE_ELTTY(DoubleTy, 2, 1); 3914 case BuiltinType::SveFloat64x2: 3915 return SVE_ELTTY(DoubleTy, 2, 2); 3916 case BuiltinType::SveFloat64x3: 3917 return SVE_ELTTY(DoubleTy, 2, 3); 3918 case BuiltinType::SveFloat64x4: 3919 return SVE_ELTTY(DoubleTy, 2, 4); 3920 case BuiltinType::SveBFloat16: 3921 return SVE_ELTTY(BFloat16Ty, 8, 1); 3922 case BuiltinType::SveBFloat16x2: 3923 return SVE_ELTTY(BFloat16Ty, 8, 2); 3924 case BuiltinType::SveBFloat16x3: 3925 return SVE_ELTTY(BFloat16Ty, 8, 3); 3926 case BuiltinType::SveBFloat16x4: 3927 return SVE_ELTTY(BFloat16Ty, 8, 4); 3928 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3929 IsSigned) \ 3930 case BuiltinType::Id: \ 3931 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3932 llvm::ElementCount::getScalable(NumEls), NF}; 3933 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3934 case BuiltinType::Id: \ 3935 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3936 llvm::ElementCount::getScalable(NumEls), NF}; 3937 #define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3938 case BuiltinType::Id: \ 3939 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF}; 3940 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3941 case BuiltinType::Id: \ 3942 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3943 #include "clang/Basic/RISCVVTypes.def" 3944 } 3945 } 3946 3947 /// getExternrefType - Return a WebAssembly externref type, which represents an 3948 /// opaque reference to a host value. 3949 QualType ASTContext::getWebAssemblyExternrefType() const { 3950 if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) { 3951 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ 3952 if (BuiltinType::Id == BuiltinType::WasmExternRef) \ 3953 return SingletonId; 3954 #include "clang/Basic/WebAssemblyReferenceTypes.def" 3955 } 3956 llvm_unreachable( 3957 "shouldn't try to generate type externref outside WebAssembly target"); 3958 } 3959 3960 /// getScalableVectorType - Return the unique reference to a scalable vector 3961 /// type of the specified element type and size. VectorType must be a built-in 3962 /// type. 3963 QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, 3964 unsigned NumFields) const { 3965 if (Target->hasAArch64SVETypes()) { 3966 uint64_t EltTySize = getTypeSize(EltTy); 3967 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3968 IsSigned, IsFP, IsBF) \ 3969 if (!EltTy->isBooleanType() && \ 3970 ((EltTy->hasIntegerRepresentation() && \ 3971 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3972 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3973 IsFP && !IsBF) || \ 3974 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3975 IsBF && !IsFP)) && \ 3976 EltTySize == ElBits && NumElts == NumEls) { \ 3977 return SingletonId; \ 3978 } 3979 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3980 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3981 return SingletonId; 3982 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId) 3983 #include "clang/Basic/AArch64SVEACLETypes.def" 3984 } else if (Target->hasRISCVVTypes()) { 3985 uint64_t EltTySize = getTypeSize(EltTy); 3986 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3987 IsFP, IsBF) \ 3988 if (!EltTy->isBooleanType() && \ 3989 ((EltTy->hasIntegerRepresentation() && \ 3990 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3991 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3992 IsFP && !IsBF) || \ 3993 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3994 IsBF && !IsFP)) && \ 3995 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ 3996 return SingletonId; 3997 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3998 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3999 return SingletonId; 4000 #include "clang/Basic/RISCVVTypes.def" 4001 } 4002 return QualType(); 4003 } 4004 4005 /// getVectorType - Return the unique reference to a vector type of 4006 /// the specified element type and size. VectorType must be a built-in type. 4007 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4008 VectorKind VecKind) const { 4009 assert(vecType->isBuiltinType() || 4010 (vecType->isBitIntType() && 4011 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4012 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4013 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4014 4015 // Check if we've already instantiated a vector of this type. 4016 llvm::FoldingSetNodeID ID; 4017 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4018 4019 void *InsertPos = nullptr; 4020 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4021 return QualType(VTP, 0); 4022 4023 // If the element type isn't canonical, this won't be a canonical type either, 4024 // so fill in the canonical type field. 4025 QualType Canonical; 4026 if (!vecType.isCanonical()) { 4027 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4028 4029 // Get the new insert position for the node we care about. 4030 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4031 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4032 } 4033 auto *New = new (*this, alignof(VectorType)) 4034 VectorType(vecType, NumElts, Canonical, VecKind); 4035 VectorTypes.InsertNode(New, InsertPos); 4036 Types.push_back(New); 4037 return QualType(New, 0); 4038 } 4039 4040 QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4041 SourceLocation AttrLoc, 4042 VectorKind VecKind) const { 4043 llvm::FoldingSetNodeID ID; 4044 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4045 VecKind); 4046 void *InsertPos = nullptr; 4047 DependentVectorType *Canon = 4048 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4049 DependentVectorType *New; 4050 4051 if (Canon) { 4052 New = new (*this, alignof(DependentVectorType)) DependentVectorType( 4053 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4054 } else { 4055 QualType CanonVecTy = getCanonicalType(VecType); 4056 if (CanonVecTy == VecType) { 4057 New = new (*this, alignof(DependentVectorType)) 4058 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4059 4060 DependentVectorType *CanonCheck = 4061 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4062 assert(!CanonCheck && 4063 "Dependent-sized vector_size canonical type broken"); 4064 (void)CanonCheck; 4065 DependentVectorTypes.InsertNode(New, InsertPos); 4066 } else { 4067 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4068 SourceLocation(), VecKind); 4069 New = new (*this, alignof(DependentVectorType)) 4070 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4071 } 4072 } 4073 4074 Types.push_back(New); 4075 return QualType(New, 0); 4076 } 4077 4078 /// getExtVectorType - Return the unique reference to an extended vector type of 4079 /// the specified element type and size. VectorType must be a built-in type. 4080 QualType ASTContext::getExtVectorType(QualType vecType, 4081 unsigned NumElts) const { 4082 assert(vecType->isBuiltinType() || vecType->isDependentType() || 4083 (vecType->isBitIntType() && 4084 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4085 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4086 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4087 4088 // Check if we've already instantiated a vector of this type. 4089 llvm::FoldingSetNodeID ID; 4090 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4091 VectorKind::Generic); 4092 void *InsertPos = nullptr; 4093 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4094 return QualType(VTP, 0); 4095 4096 // If the element type isn't canonical, this won't be a canonical type either, 4097 // so fill in the canonical type field. 4098 QualType Canonical; 4099 if (!vecType.isCanonical()) { 4100 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4101 4102 // Get the new insert position for the node we care about. 4103 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4104 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4105 } 4106 auto *New = new (*this, alignof(ExtVectorType)) 4107 ExtVectorType(vecType, NumElts, Canonical); 4108 VectorTypes.InsertNode(New, InsertPos); 4109 Types.push_back(New); 4110 return QualType(New, 0); 4111 } 4112 4113 QualType 4114 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4115 Expr *SizeExpr, 4116 SourceLocation AttrLoc) const { 4117 llvm::FoldingSetNodeID ID; 4118 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4119 SizeExpr); 4120 4121 void *InsertPos = nullptr; 4122 DependentSizedExtVectorType *Canon 4123 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4124 DependentSizedExtVectorType *New; 4125 if (Canon) { 4126 // We already have a canonical version of this array type; use it as 4127 // the canonical type for a newly-built type. 4128 New = new (*this, alignof(DependentSizedExtVectorType)) 4129 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr, 4130 AttrLoc); 4131 } else { 4132 QualType CanonVecTy = getCanonicalType(vecType); 4133 if (CanonVecTy == vecType) { 4134 New = new (*this, alignof(DependentSizedExtVectorType)) 4135 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc); 4136 4137 DependentSizedExtVectorType *CanonCheck 4138 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4139 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4140 (void)CanonCheck; 4141 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4142 } else { 4143 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4144 SourceLocation()); 4145 New = new (*this, alignof(DependentSizedExtVectorType)) 4146 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc); 4147 } 4148 } 4149 4150 Types.push_back(New); 4151 return QualType(New, 0); 4152 } 4153 4154 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4155 unsigned NumColumns) const { 4156 llvm::FoldingSetNodeID ID; 4157 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4158 Type::ConstantMatrix); 4159 4160 assert(MatrixType::isValidElementType(ElementTy) && 4161 "need a valid element type"); 4162 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4163 ConstantMatrixType::isDimensionValid(NumColumns) && 4164 "need valid matrix dimensions"); 4165 void *InsertPos = nullptr; 4166 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4167 return QualType(MTP, 0); 4168 4169 QualType Canonical; 4170 if (!ElementTy.isCanonical()) { 4171 Canonical = 4172 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4173 4174 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4175 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4176 (void)NewIP; 4177 } 4178 4179 auto *New = new (*this, alignof(ConstantMatrixType)) 4180 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4181 MatrixTypes.InsertNode(New, InsertPos); 4182 Types.push_back(New); 4183 return QualType(New, 0); 4184 } 4185 4186 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4187 Expr *RowExpr, 4188 Expr *ColumnExpr, 4189 SourceLocation AttrLoc) const { 4190 QualType CanonElementTy = getCanonicalType(ElementTy); 4191 llvm::FoldingSetNodeID ID; 4192 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4193 ColumnExpr); 4194 4195 void *InsertPos = nullptr; 4196 DependentSizedMatrixType *Canon = 4197 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4198 4199 if (!Canon) { 4200 Canon = new (*this, alignof(DependentSizedMatrixType)) 4201 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr, 4202 ColumnExpr, AttrLoc); 4203 #ifndef NDEBUG 4204 DependentSizedMatrixType *CanonCheck = 4205 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4206 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4207 #endif 4208 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4209 Types.push_back(Canon); 4210 } 4211 4212 // Already have a canonical version of the matrix type 4213 // 4214 // If it exactly matches the requested type, use it directly. 4215 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4216 Canon->getRowExpr() == ColumnExpr) 4217 return QualType(Canon, 0); 4218 4219 // Use Canon as the canonical type for newly-built type. 4220 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType)) 4221 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr, 4222 ColumnExpr, AttrLoc); 4223 Types.push_back(New); 4224 return QualType(New, 0); 4225 } 4226 4227 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4228 Expr *AddrSpaceExpr, 4229 SourceLocation AttrLoc) const { 4230 assert(AddrSpaceExpr->isInstantiationDependent()); 4231 4232 QualType canonPointeeType = getCanonicalType(PointeeType); 4233 4234 void *insertPos = nullptr; 4235 llvm::FoldingSetNodeID ID; 4236 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4237 AddrSpaceExpr); 4238 4239 DependentAddressSpaceType *canonTy = 4240 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4241 4242 if (!canonTy) { 4243 canonTy = new (*this, alignof(DependentAddressSpaceType)) 4244 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr, 4245 AttrLoc); 4246 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4247 Types.push_back(canonTy); 4248 } 4249 4250 if (canonPointeeType == PointeeType && 4251 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4252 return QualType(canonTy, 0); 4253 4254 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType)) 4255 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0), 4256 AddrSpaceExpr, AttrLoc); 4257 Types.push_back(sugaredType); 4258 return QualType(sugaredType, 0); 4259 } 4260 4261 /// Determine whether \p T is canonical as the result type of a function. 4262 static bool isCanonicalResultType(QualType T) { 4263 return T.isCanonical() && 4264 (T.getObjCLifetime() == Qualifiers::OCL_None || 4265 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4266 } 4267 4268 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4269 QualType 4270 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4271 const FunctionType::ExtInfo &Info) const { 4272 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4273 // functionality creates a function without a prototype regardless of 4274 // language mode (so it makes them even in C++). Once the rewriter has been 4275 // fixed, this assertion can be enabled again. 4276 //assert(!LangOpts.requiresStrictPrototypes() && 4277 // "strict prototypes are disabled"); 4278 4279 // Unique functions, to guarantee there is only one function of a particular 4280 // structure. 4281 llvm::FoldingSetNodeID ID; 4282 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4283 4284 void *InsertPos = nullptr; 4285 if (FunctionNoProtoType *FT = 4286 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4287 return QualType(FT, 0); 4288 4289 QualType Canonical; 4290 if (!isCanonicalResultType(ResultTy)) { 4291 Canonical = 4292 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4293 4294 // Get the new insert position for the node we care about. 4295 FunctionNoProtoType *NewIP = 4296 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4297 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4298 } 4299 4300 auto *New = new (*this, alignof(FunctionNoProtoType)) 4301 FunctionNoProtoType(ResultTy, Canonical, Info); 4302 Types.push_back(New); 4303 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4304 return QualType(New, 0); 4305 } 4306 4307 CanQualType 4308 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4309 CanQualType CanResultType = getCanonicalType(ResultType); 4310 4311 // Canonical result types do not have ARC lifetime qualifiers. 4312 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4313 Qualifiers Qs = CanResultType.getQualifiers(); 4314 Qs.removeObjCLifetime(); 4315 return CanQualType::CreateUnsafe( 4316 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4317 } 4318 4319 return CanResultType; 4320 } 4321 4322 static bool isCanonicalExceptionSpecification( 4323 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4324 if (ESI.Type == EST_None) 4325 return true; 4326 if (!NoexceptInType) 4327 return false; 4328 4329 // C++17 onwards: exception specification is part of the type, as a simple 4330 // boolean "can this function type throw". 4331 if (ESI.Type == EST_BasicNoexcept) 4332 return true; 4333 4334 // A noexcept(expr) specification is (possibly) canonical if expr is 4335 // value-dependent. 4336 if (ESI.Type == EST_DependentNoexcept) 4337 return true; 4338 4339 // A dynamic exception specification is canonical if it only contains pack 4340 // expansions (so we can't tell whether it's non-throwing) and all its 4341 // contained types are canonical. 4342 if (ESI.Type == EST_Dynamic) { 4343 bool AnyPackExpansions = false; 4344 for (QualType ET : ESI.Exceptions) { 4345 if (!ET.isCanonical()) 4346 return false; 4347 if (ET->getAs<PackExpansionType>()) 4348 AnyPackExpansions = true; 4349 } 4350 return AnyPackExpansions; 4351 } 4352 4353 return false; 4354 } 4355 4356 QualType ASTContext::getFunctionTypeInternal( 4357 QualType ResultTy, ArrayRef<QualType> ArgArray, 4358 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4359 size_t NumArgs = ArgArray.size(); 4360 4361 // Unique functions, to guarantee there is only one function of a particular 4362 // structure. 4363 llvm::FoldingSetNodeID ID; 4364 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4365 *this, true); 4366 4367 QualType Canonical; 4368 bool Unique = false; 4369 4370 void *InsertPos = nullptr; 4371 if (FunctionProtoType *FPT = 4372 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4373 QualType Existing = QualType(FPT, 0); 4374 4375 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4376 // it so long as our exception specification doesn't contain a dependent 4377 // noexcept expression, or we're just looking for a canonical type. 4378 // Otherwise, we're going to need to create a type 4379 // sugar node to hold the concrete expression. 4380 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4381 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4382 return Existing; 4383 4384 // We need a new type sugar node for this one, to hold the new noexcept 4385 // expression. We do no canonicalization here, but that's OK since we don't 4386 // expect to see the same noexcept expression much more than once. 4387 Canonical = getCanonicalType(Existing); 4388 Unique = true; 4389 } 4390 4391 bool NoexceptInType = getLangOpts().CPlusPlus17; 4392 bool IsCanonicalExceptionSpec = 4393 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4394 4395 // Determine whether the type being created is already canonical or not. 4396 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4397 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4398 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4399 if (!ArgArray[i].isCanonicalAsParam()) 4400 isCanonical = false; 4401 4402 if (OnlyWantCanonical) 4403 assert(isCanonical && 4404 "given non-canonical parameters constructing canonical type"); 4405 4406 // If this type isn't canonical, get the canonical version of it if we don't 4407 // already have it. The exception spec is only partially part of the 4408 // canonical type, and only in C++17 onwards. 4409 if (!isCanonical && Canonical.isNull()) { 4410 SmallVector<QualType, 16> CanonicalArgs; 4411 CanonicalArgs.reserve(NumArgs); 4412 for (unsigned i = 0; i != NumArgs; ++i) 4413 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4414 4415 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4416 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4417 CanonicalEPI.HasTrailingReturn = false; 4418 4419 if (IsCanonicalExceptionSpec) { 4420 // Exception spec is already OK. 4421 } else if (NoexceptInType) { 4422 switch (EPI.ExceptionSpec.Type) { 4423 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4424 // We don't know yet. It shouldn't matter what we pick here; no-one 4425 // should ever look at this. 4426 [[fallthrough]]; 4427 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4428 CanonicalEPI.ExceptionSpec.Type = EST_None; 4429 break; 4430 4431 // A dynamic exception specification is almost always "not noexcept", 4432 // with the exception that a pack expansion might expand to no types. 4433 case EST_Dynamic: { 4434 bool AnyPacks = false; 4435 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4436 if (ET->getAs<PackExpansionType>()) 4437 AnyPacks = true; 4438 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4439 } 4440 if (!AnyPacks) 4441 CanonicalEPI.ExceptionSpec.Type = EST_None; 4442 else { 4443 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4444 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4445 } 4446 break; 4447 } 4448 4449 case EST_DynamicNone: 4450 case EST_BasicNoexcept: 4451 case EST_NoexceptTrue: 4452 case EST_NoThrow: 4453 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4454 break; 4455 4456 case EST_DependentNoexcept: 4457 llvm_unreachable("dependent noexcept is already canonical"); 4458 } 4459 } else { 4460 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4461 } 4462 4463 // Adjust the canonical function result type. 4464 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4465 Canonical = 4466 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4467 4468 // Get the new insert position for the node we care about. 4469 FunctionProtoType *NewIP = 4470 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4471 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4472 } 4473 4474 // Compute the needed size to hold this FunctionProtoType and the 4475 // various trailing objects. 4476 auto ESH = FunctionProtoType::getExceptionSpecSize( 4477 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4478 size_t Size = FunctionProtoType::totalSizeToAlloc< 4479 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4480 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4481 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4482 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4483 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4484 EPI.ExtParameterInfos ? NumArgs : 0, 4485 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4486 4487 auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType)); 4488 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4489 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4490 Types.push_back(FTP); 4491 if (!Unique) 4492 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4493 return QualType(FTP, 0); 4494 } 4495 4496 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4497 llvm::FoldingSetNodeID ID; 4498 PipeType::Profile(ID, T, ReadOnly); 4499 4500 void *InsertPos = nullptr; 4501 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4502 return QualType(PT, 0); 4503 4504 // If the pipe element type isn't canonical, this won't be a canonical type 4505 // either, so fill in the canonical type field. 4506 QualType Canonical; 4507 if (!T.isCanonical()) { 4508 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4509 4510 // Get the new insert position for the node we care about. 4511 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4512 assert(!NewIP && "Shouldn't be in the map!"); 4513 (void)NewIP; 4514 } 4515 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly); 4516 Types.push_back(New); 4517 PipeTypes.InsertNode(New, InsertPos); 4518 return QualType(New, 0); 4519 } 4520 4521 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4522 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4523 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4524 : Ty; 4525 } 4526 4527 QualType ASTContext::getReadPipeType(QualType T) const { 4528 return getPipeType(T, true); 4529 } 4530 4531 QualType ASTContext::getWritePipeType(QualType T) const { 4532 return getPipeType(T, false); 4533 } 4534 4535 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4536 llvm::FoldingSetNodeID ID; 4537 BitIntType::Profile(ID, IsUnsigned, NumBits); 4538 4539 void *InsertPos = nullptr; 4540 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4541 return QualType(EIT, 0); 4542 4543 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits); 4544 BitIntTypes.InsertNode(New, InsertPos); 4545 Types.push_back(New); 4546 return QualType(New, 0); 4547 } 4548 4549 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4550 Expr *NumBitsExpr) const { 4551 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4552 llvm::FoldingSetNodeID ID; 4553 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4554 4555 void *InsertPos = nullptr; 4556 if (DependentBitIntType *Existing = 4557 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4558 return QualType(Existing, 0); 4559 4560 auto *New = new (*this, alignof(DependentBitIntType)) 4561 DependentBitIntType(IsUnsigned, NumBitsExpr); 4562 DependentBitIntTypes.InsertNode(New, InsertPos); 4563 4564 Types.push_back(New); 4565 return QualType(New, 0); 4566 } 4567 4568 #ifndef NDEBUG 4569 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4570 if (!isa<CXXRecordDecl>(D)) return false; 4571 const auto *RD = cast<CXXRecordDecl>(D); 4572 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4573 return true; 4574 if (RD->getDescribedClassTemplate() && 4575 !isa<ClassTemplateSpecializationDecl>(RD)) 4576 return true; 4577 return false; 4578 } 4579 #endif 4580 4581 /// getInjectedClassNameType - Return the unique reference to the 4582 /// injected class name type for the specified templated declaration. 4583 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4584 QualType TST) const { 4585 assert(NeedsInjectedClassNameType(Decl)); 4586 if (Decl->TypeForDecl) { 4587 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4588 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4589 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4590 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4591 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4592 } else { 4593 Type *newType = new (*this, alignof(InjectedClassNameType)) 4594 InjectedClassNameType(Decl, TST); 4595 Decl->TypeForDecl = newType; 4596 Types.push_back(newType); 4597 } 4598 return QualType(Decl->TypeForDecl, 0); 4599 } 4600 4601 /// getTypeDeclType - Return the unique reference to the type for the 4602 /// specified type declaration. 4603 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4604 assert(Decl && "Passed null for Decl param"); 4605 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4606 4607 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4608 return getTypedefType(Typedef); 4609 4610 assert(!isa<TemplateTypeParmDecl>(Decl) && 4611 "Template type parameter types are always available."); 4612 4613 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4614 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4615 assert(!NeedsInjectedClassNameType(Record)); 4616 return getRecordType(Record); 4617 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4618 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4619 return getEnumType(Enum); 4620 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4621 return getUnresolvedUsingType(Using); 4622 } else 4623 llvm_unreachable("TypeDecl without a type?"); 4624 4625 return QualType(Decl->TypeForDecl, 0); 4626 } 4627 4628 /// getTypedefType - Return the unique reference to the type for the 4629 /// specified typedef name decl. 4630 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4631 QualType Underlying) const { 4632 if (!Decl->TypeForDecl) { 4633 if (Underlying.isNull()) 4634 Underlying = Decl->getUnderlyingType(); 4635 auto *NewType = new (*this, alignof(TypedefType)) TypedefType( 4636 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying)); 4637 Decl->TypeForDecl = NewType; 4638 Types.push_back(NewType); 4639 return QualType(NewType, 0); 4640 } 4641 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying) 4642 return QualType(Decl->TypeForDecl, 0); 4643 assert(hasSameType(Decl->getUnderlyingType(), Underlying)); 4644 4645 llvm::FoldingSetNodeID ID; 4646 TypedefType::Profile(ID, Decl, Underlying); 4647 4648 void *InsertPos = nullptr; 4649 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4650 assert(!T->typeMatchesDecl() && 4651 "non-divergent case should be handled with TypeDecl"); 4652 return QualType(T, 0); 4653 } 4654 4655 void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true), 4656 alignof(TypedefType)); 4657 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying, 4658 getCanonicalType(Underlying)); 4659 TypedefTypes.InsertNode(NewType, InsertPos); 4660 Types.push_back(NewType); 4661 return QualType(NewType, 0); 4662 } 4663 4664 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4665 QualType Underlying) const { 4666 llvm::FoldingSetNodeID ID; 4667 UsingType::Profile(ID, Found, Underlying); 4668 4669 void *InsertPos = nullptr; 4670 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos)) 4671 return QualType(T, 0); 4672 4673 const Type *TypeForDecl = 4674 cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(); 4675 4676 assert(!Underlying.hasLocalQualifiers()); 4677 QualType Canon = Underlying->getCanonicalTypeInternal(); 4678 assert(TypeForDecl->getCanonicalTypeInternal() == Canon); 4679 4680 if (Underlying.getTypePtr() == TypeForDecl) 4681 Underlying = QualType(); 4682 void *Mem = 4683 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()), 4684 alignof(UsingType)); 4685 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon); 4686 Types.push_back(NewType); 4687 UsingTypes.InsertNode(NewType, InsertPos); 4688 return QualType(NewType, 0); 4689 } 4690 4691 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4692 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4693 4694 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4695 if (PrevDecl->TypeForDecl) 4696 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4697 4698 auto *newType = new (*this, alignof(RecordType)) RecordType(Decl); 4699 Decl->TypeForDecl = newType; 4700 Types.push_back(newType); 4701 return QualType(newType, 0); 4702 } 4703 4704 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4705 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4706 4707 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4708 if (PrevDecl->TypeForDecl) 4709 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4710 4711 auto *newType = new (*this, alignof(EnumType)) EnumType(Decl); 4712 Decl->TypeForDecl = newType; 4713 Types.push_back(newType); 4714 return QualType(newType, 0); 4715 } 4716 4717 QualType ASTContext::getUnresolvedUsingType( 4718 const UnresolvedUsingTypenameDecl *Decl) const { 4719 if (Decl->TypeForDecl) 4720 return QualType(Decl->TypeForDecl, 0); 4721 4722 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4723 Decl->getCanonicalDecl()) 4724 if (CanonicalDecl->TypeForDecl) 4725 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4726 4727 Type *newType = 4728 new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl); 4729 Decl->TypeForDecl = newType; 4730 Types.push_back(newType); 4731 return QualType(newType, 0); 4732 } 4733 4734 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4735 QualType modifiedType, 4736 QualType equivalentType) const { 4737 llvm::FoldingSetNodeID id; 4738 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4739 4740 void *insertPos = nullptr; 4741 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4742 if (type) return QualType(type, 0); 4743 4744 QualType canon = getCanonicalType(equivalentType); 4745 type = new (*this, alignof(AttributedType)) 4746 AttributedType(canon, attrKind, modifiedType, equivalentType); 4747 4748 Types.push_back(type); 4749 AttributedTypes.InsertNode(type, insertPos); 4750 4751 return QualType(type, 0); 4752 } 4753 4754 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 4755 QualType Wrapped) { 4756 llvm::FoldingSetNodeID ID; 4757 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 4758 4759 void *InsertPos = nullptr; 4760 BTFTagAttributedType *Ty = 4761 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 4762 if (Ty) 4763 return QualType(Ty, 0); 4764 4765 QualType Canon = getCanonicalType(Wrapped); 4766 Ty = new (*this, alignof(BTFTagAttributedType)) 4767 BTFTagAttributedType(Canon, Wrapped, BTFAttr); 4768 4769 Types.push_back(Ty); 4770 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 4771 4772 return QualType(Ty, 0); 4773 } 4774 4775 /// Retrieve a substitution-result type. 4776 QualType ASTContext::getSubstTemplateTypeParmType( 4777 QualType Replacement, Decl *AssociatedDecl, unsigned Index, 4778 std::optional<unsigned> PackIndex) const { 4779 llvm::FoldingSetNodeID ID; 4780 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index, 4781 PackIndex); 4782 void *InsertPos = nullptr; 4783 SubstTemplateTypeParmType *SubstParm = 4784 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4785 4786 if (!SubstParm) { 4787 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>( 4788 !Replacement.isCanonical()), 4789 alignof(SubstTemplateTypeParmType)); 4790 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl, 4791 Index, PackIndex); 4792 Types.push_back(SubstParm); 4793 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4794 } 4795 4796 return QualType(SubstParm, 0); 4797 } 4798 4799 /// Retrieve a 4800 QualType 4801 ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl, 4802 unsigned Index, bool Final, 4803 const TemplateArgument &ArgPack) { 4804 #ifndef NDEBUG 4805 for (const auto &P : ArgPack.pack_elements()) 4806 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type"); 4807 #endif 4808 4809 llvm::FoldingSetNodeID ID; 4810 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final, 4811 ArgPack); 4812 void *InsertPos = nullptr; 4813 if (SubstTemplateTypeParmPackType *SubstParm = 4814 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4815 return QualType(SubstParm, 0); 4816 4817 QualType Canon; 4818 { 4819 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); 4820 if (!AssociatedDecl->isCanonicalDecl() || 4821 !CanonArgPack.structurallyEquals(ArgPack)) { 4822 Canon = getSubstTemplateTypeParmPackType( 4823 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack); 4824 [[maybe_unused]] const auto *Nothing = 4825 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4826 assert(!Nothing); 4827 } 4828 } 4829 4830 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType)) 4831 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final, 4832 ArgPack); 4833 Types.push_back(SubstParm); 4834 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4835 return QualType(SubstParm, 0); 4836 } 4837 4838 /// Retrieve the template type parameter type for a template 4839 /// parameter or parameter pack with the given depth, index, and (optionally) 4840 /// name. 4841 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4842 bool ParameterPack, 4843 TemplateTypeParmDecl *TTPDecl) const { 4844 llvm::FoldingSetNodeID ID; 4845 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4846 void *InsertPos = nullptr; 4847 TemplateTypeParmType *TypeParm 4848 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4849 4850 if (TypeParm) 4851 return QualType(TypeParm, 0); 4852 4853 if (TTPDecl) { 4854 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4855 TypeParm = new (*this, alignof(TemplateTypeParmType)) 4856 TemplateTypeParmType(TTPDecl, Canon); 4857 4858 TemplateTypeParmType *TypeCheck 4859 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4860 assert(!TypeCheck && "Template type parameter canonical type broken"); 4861 (void)TypeCheck; 4862 } else 4863 TypeParm = new (*this, alignof(TemplateTypeParmType)) 4864 TemplateTypeParmType(Depth, Index, ParameterPack); 4865 4866 Types.push_back(TypeParm); 4867 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4868 4869 return QualType(TypeParm, 0); 4870 } 4871 4872 TypeSourceInfo * 4873 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4874 SourceLocation NameLoc, 4875 const TemplateArgumentListInfo &Args, 4876 QualType Underlying) const { 4877 assert(!Name.getAsDependentTemplateName() && 4878 "No dependent template names here!"); 4879 QualType TST = 4880 getTemplateSpecializationType(Name, Args.arguments(), Underlying); 4881 4882 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4883 TemplateSpecializationTypeLoc TL = 4884 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4885 TL.setTemplateKeywordLoc(SourceLocation()); 4886 TL.setTemplateNameLoc(NameLoc); 4887 TL.setLAngleLoc(Args.getLAngleLoc()); 4888 TL.setRAngleLoc(Args.getRAngleLoc()); 4889 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4890 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4891 return DI; 4892 } 4893 4894 QualType 4895 ASTContext::getTemplateSpecializationType(TemplateName Template, 4896 ArrayRef<TemplateArgumentLoc> Args, 4897 QualType Underlying) const { 4898 assert(!Template.getAsDependentTemplateName() && 4899 "No dependent template names here!"); 4900 4901 SmallVector<TemplateArgument, 4> ArgVec; 4902 ArgVec.reserve(Args.size()); 4903 for (const TemplateArgumentLoc &Arg : Args) 4904 ArgVec.push_back(Arg.getArgument()); 4905 4906 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4907 } 4908 4909 #ifndef NDEBUG 4910 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4911 for (const TemplateArgument &Arg : Args) 4912 if (Arg.isPackExpansion()) 4913 return true; 4914 4915 return true; 4916 } 4917 #endif 4918 4919 QualType 4920 ASTContext::getTemplateSpecializationType(TemplateName Template, 4921 ArrayRef<TemplateArgument> Args, 4922 QualType Underlying) const { 4923 assert(!Template.getAsDependentTemplateName() && 4924 "No dependent template names here!"); 4925 // Look through qualified template names. 4926 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4927 Template = QTN->getUnderlyingTemplate(); 4928 4929 const auto *TD = Template.getAsTemplateDecl(); 4930 bool IsTypeAlias = TD && TD->isTypeAlias(); 4931 QualType CanonType; 4932 if (!Underlying.isNull()) 4933 CanonType = getCanonicalType(Underlying); 4934 else { 4935 // We can get here with an alias template when the specialization contains 4936 // a pack expansion that does not match up with a parameter pack. 4937 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4938 "Caller must compute aliased type"); 4939 IsTypeAlias = false; 4940 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4941 } 4942 4943 // Allocate the (non-canonical) template specialization type, but don't 4944 // try to unique it: these types typically have location information that 4945 // we don't unique and don't want to lose. 4946 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4947 sizeof(TemplateArgument) * Args.size() + 4948 (IsTypeAlias ? sizeof(QualType) : 0), 4949 alignof(TemplateSpecializationType)); 4950 auto *Spec 4951 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4952 IsTypeAlias ? Underlying : QualType()); 4953 4954 Types.push_back(Spec); 4955 return QualType(Spec, 0); 4956 } 4957 4958 QualType ASTContext::getCanonicalTemplateSpecializationType( 4959 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4960 assert(!Template.getAsDependentTemplateName() && 4961 "No dependent template names here!"); 4962 4963 // Look through qualified template names. 4964 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4965 Template = TemplateName(QTN->getUnderlyingTemplate()); 4966 4967 // Build the canonical template specialization type. 4968 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4969 bool AnyNonCanonArgs = false; 4970 auto CanonArgs = 4971 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 4972 4973 // Determine whether this canonical template specialization type already 4974 // exists. 4975 llvm::FoldingSetNodeID ID; 4976 TemplateSpecializationType::Profile(ID, CanonTemplate, 4977 CanonArgs, *this); 4978 4979 void *InsertPos = nullptr; 4980 TemplateSpecializationType *Spec 4981 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4982 4983 if (!Spec) { 4984 // Allocate a new canonical template specialization type. 4985 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4986 sizeof(TemplateArgument) * CanonArgs.size()), 4987 alignof(TemplateSpecializationType)); 4988 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 4989 CanonArgs, 4990 QualType(), QualType()); 4991 Types.push_back(Spec); 4992 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 4993 } 4994 4995 assert(Spec->isDependentType() && 4996 "Non-dependent template-id type must have a canonical type"); 4997 return QualType(Spec, 0); 4998 } 4999 5000 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 5001 NestedNameSpecifier *NNS, 5002 QualType NamedType, 5003 TagDecl *OwnedTagDecl) const { 5004 llvm::FoldingSetNodeID ID; 5005 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 5006 5007 void *InsertPos = nullptr; 5008 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5009 if (T) 5010 return QualType(T, 0); 5011 5012 QualType Canon = NamedType; 5013 if (!Canon.isCanonical()) { 5014 Canon = getCanonicalType(NamedType); 5015 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5016 assert(!CheckT && "Elaborated canonical type broken"); 5017 (void)CheckT; 5018 } 5019 5020 void *Mem = 5021 Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 5022 alignof(ElaboratedType)); 5023 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 5024 5025 Types.push_back(T); 5026 ElaboratedTypes.InsertNode(T, InsertPos); 5027 return QualType(T, 0); 5028 } 5029 5030 QualType 5031 ASTContext::getParenType(QualType InnerType) const { 5032 llvm::FoldingSetNodeID ID; 5033 ParenType::Profile(ID, InnerType); 5034 5035 void *InsertPos = nullptr; 5036 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5037 if (T) 5038 return QualType(T, 0); 5039 5040 QualType Canon = InnerType; 5041 if (!Canon.isCanonical()) { 5042 Canon = getCanonicalType(InnerType); 5043 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5044 assert(!CheckT && "Paren canonical type broken"); 5045 (void)CheckT; 5046 } 5047 5048 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon); 5049 Types.push_back(T); 5050 ParenTypes.InsertNode(T, InsertPos); 5051 return QualType(T, 0); 5052 } 5053 5054 QualType 5055 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5056 const IdentifierInfo *MacroII) const { 5057 QualType Canon = UnderlyingTy; 5058 if (!Canon.isCanonical()) 5059 Canon = getCanonicalType(UnderlyingTy); 5060 5061 auto *newType = new (*this, alignof(MacroQualifiedType)) 5062 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5063 Types.push_back(newType); 5064 return QualType(newType, 0); 5065 } 5066 5067 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5068 NestedNameSpecifier *NNS, 5069 const IdentifierInfo *Name, 5070 QualType Canon) const { 5071 if (Canon.isNull()) { 5072 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5073 if (CanonNNS != NNS) 5074 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5075 } 5076 5077 llvm::FoldingSetNodeID ID; 5078 DependentNameType::Profile(ID, Keyword, NNS, Name); 5079 5080 void *InsertPos = nullptr; 5081 DependentNameType *T 5082 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5083 if (T) 5084 return QualType(T, 0); 5085 5086 T = new (*this, alignof(DependentNameType)) 5087 DependentNameType(Keyword, NNS, Name, Canon); 5088 Types.push_back(T); 5089 DependentNameTypes.InsertNode(T, InsertPos); 5090 return QualType(T, 0); 5091 } 5092 5093 QualType ASTContext::getDependentTemplateSpecializationType( 5094 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, 5095 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const { 5096 // TODO: avoid this copy 5097 SmallVector<TemplateArgument, 16> ArgCopy; 5098 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5099 ArgCopy.push_back(Args[I].getArgument()); 5100 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5101 } 5102 5103 QualType 5104 ASTContext::getDependentTemplateSpecializationType( 5105 ElaboratedTypeKeyword Keyword, 5106 NestedNameSpecifier *NNS, 5107 const IdentifierInfo *Name, 5108 ArrayRef<TemplateArgument> Args) const { 5109 assert((!NNS || NNS->isDependent()) && 5110 "nested-name-specifier must be dependent"); 5111 5112 llvm::FoldingSetNodeID ID; 5113 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5114 Name, Args); 5115 5116 void *InsertPos = nullptr; 5117 DependentTemplateSpecializationType *T 5118 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5119 if (T) 5120 return QualType(T, 0); 5121 5122 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5123 5124 ElaboratedTypeKeyword CanonKeyword = Keyword; 5125 if (Keyword == ElaboratedTypeKeyword::None) 5126 CanonKeyword = ElaboratedTypeKeyword::Typename; 5127 5128 bool AnyNonCanonArgs = false; 5129 auto CanonArgs = 5130 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5131 5132 QualType Canon; 5133 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5134 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5135 Name, 5136 CanonArgs); 5137 5138 // Find the insert position again. 5139 [[maybe_unused]] auto *Nothing = 5140 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5141 assert(!Nothing && "canonical type broken"); 5142 } 5143 5144 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5145 sizeof(TemplateArgument) * Args.size()), 5146 alignof(DependentTemplateSpecializationType)); 5147 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5148 Name, Args, Canon); 5149 Types.push_back(T); 5150 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5151 return QualType(T, 0); 5152 } 5153 5154 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5155 TemplateArgument Arg; 5156 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5157 QualType ArgType = getTypeDeclType(TTP); 5158 if (TTP->isParameterPack()) 5159 ArgType = getPackExpansionType(ArgType, std::nullopt); 5160 5161 Arg = TemplateArgument(ArgType); 5162 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5163 QualType T = 5164 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5165 // For class NTTPs, ensure we include the 'const' so the type matches that 5166 // of a real template argument. 5167 // FIXME: It would be more faithful to model this as something like an 5168 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5169 if (T->isRecordType()) 5170 T.addConst(); 5171 Expr *E = new (*this) DeclRefExpr( 5172 *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T, 5173 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5174 5175 if (NTTP->isParameterPack()) 5176 E = new (*this) 5177 PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt); 5178 Arg = TemplateArgument(E); 5179 } else { 5180 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5181 if (TTP->isParameterPack()) 5182 Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>()); 5183 else 5184 Arg = TemplateArgument(TemplateName(TTP)); 5185 } 5186 5187 if (Param->isTemplateParameterPack()) 5188 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5189 5190 return Arg; 5191 } 5192 5193 void 5194 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5195 SmallVectorImpl<TemplateArgument> &Args) { 5196 Args.reserve(Args.size() + Params->size()); 5197 5198 for (NamedDecl *Param : *Params) 5199 Args.push_back(getInjectedTemplateArg(Param)); 5200 } 5201 5202 QualType ASTContext::getPackExpansionType(QualType Pattern, 5203 std::optional<unsigned> NumExpansions, 5204 bool ExpectPackInType) { 5205 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5206 "Pack expansions must expand one or more parameter packs"); 5207 5208 llvm::FoldingSetNodeID ID; 5209 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5210 5211 void *InsertPos = nullptr; 5212 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5213 if (T) 5214 return QualType(T, 0); 5215 5216 QualType Canon; 5217 if (!Pattern.isCanonical()) { 5218 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5219 /*ExpectPackInType=*/false); 5220 5221 // Find the insert position again, in case we inserted an element into 5222 // PackExpansionTypes and invalidated our insert position. 5223 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5224 } 5225 5226 T = new (*this, alignof(PackExpansionType)) 5227 PackExpansionType(Pattern, Canon, NumExpansions); 5228 Types.push_back(T); 5229 PackExpansionTypes.InsertNode(T, InsertPos); 5230 return QualType(T, 0); 5231 } 5232 5233 /// CmpProtocolNames - Comparison predicate for sorting protocols 5234 /// alphabetically. 5235 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5236 ObjCProtocolDecl *const *RHS) { 5237 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5238 } 5239 5240 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5241 if (Protocols.empty()) return true; 5242 5243 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5244 return false; 5245 5246 for (unsigned i = 1; i != Protocols.size(); ++i) 5247 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5248 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5249 return false; 5250 return true; 5251 } 5252 5253 static void 5254 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5255 // Sort protocols, keyed by name. 5256 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5257 5258 // Canonicalize. 5259 for (ObjCProtocolDecl *&P : Protocols) 5260 P = P->getCanonicalDecl(); 5261 5262 // Remove duplicates. 5263 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5264 Protocols.erase(ProtocolsEnd, Protocols.end()); 5265 } 5266 5267 QualType ASTContext::getObjCObjectType(QualType BaseType, 5268 ObjCProtocolDecl * const *Protocols, 5269 unsigned NumProtocols) const { 5270 return getObjCObjectType(BaseType, {}, 5271 llvm::ArrayRef(Protocols, NumProtocols), 5272 /*isKindOf=*/false); 5273 } 5274 5275 QualType ASTContext::getObjCObjectType( 5276 QualType baseType, 5277 ArrayRef<QualType> typeArgs, 5278 ArrayRef<ObjCProtocolDecl *> protocols, 5279 bool isKindOf) const { 5280 // If the base type is an interface and there aren't any protocols or 5281 // type arguments to add, then the interface type will do just fine. 5282 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5283 isa<ObjCInterfaceType>(baseType)) 5284 return baseType; 5285 5286 // Look in the folding set for an existing type. 5287 llvm::FoldingSetNodeID ID; 5288 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5289 void *InsertPos = nullptr; 5290 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5291 return QualType(QT, 0); 5292 5293 // Determine the type arguments to be used for canonicalization, 5294 // which may be explicitly specified here or written on the base 5295 // type. 5296 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5297 if (effectiveTypeArgs.empty()) { 5298 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5299 effectiveTypeArgs = baseObject->getTypeArgs(); 5300 } 5301 5302 // Build the canonical type, which has the canonical base type and a 5303 // sorted-and-uniqued list of protocols and the type arguments 5304 // canonicalized. 5305 QualType canonical; 5306 bool typeArgsAreCanonical = llvm::all_of( 5307 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5308 bool protocolsSorted = areSortedAndUniqued(protocols); 5309 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5310 // Determine the canonical type arguments. 5311 ArrayRef<QualType> canonTypeArgs; 5312 SmallVector<QualType, 4> canonTypeArgsVec; 5313 if (!typeArgsAreCanonical) { 5314 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5315 for (auto typeArg : effectiveTypeArgs) 5316 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5317 canonTypeArgs = canonTypeArgsVec; 5318 } else { 5319 canonTypeArgs = effectiveTypeArgs; 5320 } 5321 5322 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5323 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5324 if (!protocolsSorted) { 5325 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5326 SortAndUniqueProtocols(canonProtocolsVec); 5327 canonProtocols = canonProtocolsVec; 5328 } else { 5329 canonProtocols = protocols; 5330 } 5331 5332 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5333 canonProtocols, isKindOf); 5334 5335 // Regenerate InsertPos. 5336 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5337 } 5338 5339 unsigned size = sizeof(ObjCObjectTypeImpl); 5340 size += typeArgs.size() * sizeof(QualType); 5341 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5342 void *mem = Allocate(size, alignof(ObjCObjectTypeImpl)); 5343 auto *T = 5344 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5345 isKindOf); 5346 5347 Types.push_back(T); 5348 ObjCObjectTypes.InsertNode(T, InsertPos); 5349 return QualType(T, 0); 5350 } 5351 5352 /// Apply Objective-C protocol qualifiers to the given type. 5353 /// If this is for the canonical type of a type parameter, we can apply 5354 /// protocol qualifiers on the ObjCObjectPointerType. 5355 QualType 5356 ASTContext::applyObjCProtocolQualifiers(QualType type, 5357 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5358 bool allowOnPointerType) const { 5359 hasError = false; 5360 5361 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5362 return getObjCTypeParamType(objT->getDecl(), protocols); 5363 } 5364 5365 // Apply protocol qualifiers to ObjCObjectPointerType. 5366 if (allowOnPointerType) { 5367 if (const auto *objPtr = 5368 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5369 const ObjCObjectType *objT = objPtr->getObjectType(); 5370 // Merge protocol lists and construct ObjCObjectType. 5371 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5372 protocolsVec.append(objT->qual_begin(), 5373 objT->qual_end()); 5374 protocolsVec.append(protocols.begin(), protocols.end()); 5375 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5376 type = getObjCObjectType( 5377 objT->getBaseType(), 5378 objT->getTypeArgsAsWritten(), 5379 protocols, 5380 objT->isKindOfTypeAsWritten()); 5381 return getObjCObjectPointerType(type); 5382 } 5383 } 5384 5385 // Apply protocol qualifiers to ObjCObjectType. 5386 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5387 // FIXME: Check for protocols to which the class type is already 5388 // known to conform. 5389 5390 return getObjCObjectType(objT->getBaseType(), 5391 objT->getTypeArgsAsWritten(), 5392 protocols, 5393 objT->isKindOfTypeAsWritten()); 5394 } 5395 5396 // If the canonical type is ObjCObjectType, ... 5397 if (type->isObjCObjectType()) { 5398 // Silently overwrite any existing protocol qualifiers. 5399 // TODO: determine whether that's the right thing to do. 5400 5401 // FIXME: Check for protocols to which the class type is already 5402 // known to conform. 5403 return getObjCObjectType(type, {}, protocols, false); 5404 } 5405 5406 // id<protocol-list> 5407 if (type->isObjCIdType()) { 5408 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5409 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5410 objPtr->isKindOfType()); 5411 return getObjCObjectPointerType(type); 5412 } 5413 5414 // Class<protocol-list> 5415 if (type->isObjCClassType()) { 5416 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5417 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5418 objPtr->isKindOfType()); 5419 return getObjCObjectPointerType(type); 5420 } 5421 5422 hasError = true; 5423 return type; 5424 } 5425 5426 QualType 5427 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5428 ArrayRef<ObjCProtocolDecl *> protocols) const { 5429 // Look in the folding set for an existing type. 5430 llvm::FoldingSetNodeID ID; 5431 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5432 void *InsertPos = nullptr; 5433 if (ObjCTypeParamType *TypeParam = 5434 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5435 return QualType(TypeParam, 0); 5436 5437 // We canonicalize to the underlying type. 5438 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5439 if (!protocols.empty()) { 5440 // Apply the protocol qualifers. 5441 bool hasError; 5442 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5443 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5444 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5445 } 5446 5447 unsigned size = sizeof(ObjCTypeParamType); 5448 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5449 void *mem = Allocate(size, alignof(ObjCTypeParamType)); 5450 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5451 5452 Types.push_back(newType); 5453 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5454 return QualType(newType, 0); 5455 } 5456 5457 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5458 ObjCTypeParamDecl *New) const { 5459 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5460 // Update TypeForDecl after updating TypeSourceInfo. 5461 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5462 SmallVector<ObjCProtocolDecl *, 8> protocols; 5463 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5464 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5465 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5466 } 5467 5468 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5469 /// protocol list adopt all protocols in QT's qualified-id protocol 5470 /// list. 5471 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5472 ObjCInterfaceDecl *IC) { 5473 if (!QT->isObjCQualifiedIdType()) 5474 return false; 5475 5476 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5477 // If both the right and left sides have qualifiers. 5478 for (auto *Proto : OPT->quals()) { 5479 if (!IC->ClassImplementsProtocol(Proto, false)) 5480 return false; 5481 } 5482 return true; 5483 } 5484 return false; 5485 } 5486 5487 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5488 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5489 /// of protocols. 5490 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5491 ObjCInterfaceDecl *IDecl) { 5492 if (!QT->isObjCQualifiedIdType()) 5493 return false; 5494 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5495 if (!OPT) 5496 return false; 5497 if (!IDecl->hasDefinition()) 5498 return false; 5499 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5500 CollectInheritedProtocols(IDecl, InheritedProtocols); 5501 if (InheritedProtocols.empty()) 5502 return false; 5503 // Check that if every protocol in list of id<plist> conforms to a protocol 5504 // of IDecl's, then bridge casting is ok. 5505 bool Conforms = false; 5506 for (auto *Proto : OPT->quals()) { 5507 Conforms = false; 5508 for (auto *PI : InheritedProtocols) { 5509 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5510 Conforms = true; 5511 break; 5512 } 5513 } 5514 if (!Conforms) 5515 break; 5516 } 5517 if (Conforms) 5518 return true; 5519 5520 for (auto *PI : InheritedProtocols) { 5521 // If both the right and left sides have qualifiers. 5522 bool Adopts = false; 5523 for (auto *Proto : OPT->quals()) { 5524 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5525 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5526 break; 5527 } 5528 if (!Adopts) 5529 return false; 5530 } 5531 return true; 5532 } 5533 5534 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5535 /// the given object type. 5536 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5537 llvm::FoldingSetNodeID ID; 5538 ObjCObjectPointerType::Profile(ID, ObjectT); 5539 5540 void *InsertPos = nullptr; 5541 if (ObjCObjectPointerType *QT = 5542 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5543 return QualType(QT, 0); 5544 5545 // Find the canonical object type. 5546 QualType Canonical; 5547 if (!ObjectT.isCanonical()) { 5548 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5549 5550 // Regenerate InsertPos. 5551 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5552 } 5553 5554 // No match. 5555 void *Mem = 5556 Allocate(sizeof(ObjCObjectPointerType), alignof(ObjCObjectPointerType)); 5557 auto *QType = 5558 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5559 5560 Types.push_back(QType); 5561 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5562 return QualType(QType, 0); 5563 } 5564 5565 /// getObjCInterfaceType - Return the unique reference to the type for the 5566 /// specified ObjC interface decl. The list of protocols is optional. 5567 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5568 ObjCInterfaceDecl *PrevDecl) const { 5569 if (Decl->TypeForDecl) 5570 return QualType(Decl->TypeForDecl, 0); 5571 5572 if (PrevDecl) { 5573 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5574 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5575 return QualType(PrevDecl->TypeForDecl, 0); 5576 } 5577 5578 // Prefer the definition, if there is one. 5579 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5580 Decl = Def; 5581 5582 void *Mem = Allocate(sizeof(ObjCInterfaceType), alignof(ObjCInterfaceType)); 5583 auto *T = new (Mem) ObjCInterfaceType(Decl); 5584 Decl->TypeForDecl = T; 5585 Types.push_back(T); 5586 return QualType(T, 0); 5587 } 5588 5589 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5590 /// TypeOfExprType AST's (since expression's are never shared). For example, 5591 /// multiple declarations that refer to "typeof(x)" all contain different 5592 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5593 /// on canonical type's (which are always unique). 5594 QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { 5595 TypeOfExprType *toe; 5596 if (tofExpr->isTypeDependent()) { 5597 llvm::FoldingSetNodeID ID; 5598 DependentTypeOfExprType::Profile(ID, *this, tofExpr, 5599 Kind == TypeOfKind::Unqualified); 5600 5601 void *InsertPos = nullptr; 5602 DependentTypeOfExprType *Canon = 5603 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5604 if (Canon) { 5605 // We already have a "canonical" version of an identical, dependent 5606 // typeof(expr) type. Use that as our canonical type. 5607 toe = new (*this, alignof(TypeOfExprType)) 5608 TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); 5609 } else { 5610 // Build a new, canonical typeof(expr) type. 5611 Canon = new (*this, alignof(DependentTypeOfExprType)) 5612 DependentTypeOfExprType(tofExpr, Kind); 5613 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5614 toe = Canon; 5615 } 5616 } else { 5617 QualType Canonical = getCanonicalType(tofExpr->getType()); 5618 toe = new (*this, alignof(TypeOfExprType)) 5619 TypeOfExprType(tofExpr, Kind, Canonical); 5620 } 5621 Types.push_back(toe); 5622 return QualType(toe, 0); 5623 } 5624 5625 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5626 /// TypeOfType nodes. The only motivation to unique these nodes would be 5627 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5628 /// an issue. This doesn't affect the type checker, since it operates 5629 /// on canonical types (which are always unique). 5630 QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { 5631 QualType Canonical = getCanonicalType(tofType); 5632 auto *tot = 5633 new (*this, alignof(TypeOfType)) TypeOfType(tofType, Canonical, Kind); 5634 Types.push_back(tot); 5635 return QualType(tot, 0); 5636 } 5637 5638 /// getReferenceQualifiedType - Given an expr, will return the type for 5639 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5640 /// and class member access into account. 5641 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5642 // C++11 [dcl.type.simple]p4: 5643 // [...] 5644 QualType T = E->getType(); 5645 switch (E->getValueKind()) { 5646 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5647 // type of e; 5648 case VK_XValue: 5649 return getRValueReferenceType(T); 5650 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5651 // type of e; 5652 case VK_LValue: 5653 return getLValueReferenceType(T); 5654 // - otherwise, decltype(e) is the type of e. 5655 case VK_PRValue: 5656 return T; 5657 } 5658 llvm_unreachable("Unknown value kind"); 5659 } 5660 5661 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5662 /// nodes. This would never be helpful, since each such type has its own 5663 /// expression, and would not give a significant memory saving, since there 5664 /// is an Expr tree under each such type. 5665 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5666 DecltypeType *dt; 5667 5668 // C++11 [temp.type]p2: 5669 // If an expression e involves a template parameter, decltype(e) denotes a 5670 // unique dependent type. Two such decltype-specifiers refer to the same 5671 // type only if their expressions are equivalent (14.5.6.1). 5672 if (e->isInstantiationDependent()) { 5673 llvm::FoldingSetNodeID ID; 5674 DependentDecltypeType::Profile(ID, *this, e); 5675 5676 void *InsertPos = nullptr; 5677 DependentDecltypeType *Canon 5678 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5679 if (!Canon) { 5680 // Build a new, canonical decltype(expr) type. 5681 Canon = new (*this, alignof(DependentDecltypeType)) 5682 DependentDecltypeType(e, DependentTy); 5683 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5684 } 5685 dt = new (*this, alignof(DecltypeType)) 5686 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5687 } else { 5688 dt = new (*this, alignof(DecltypeType)) 5689 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5690 } 5691 Types.push_back(dt); 5692 return QualType(dt, 0); 5693 } 5694 5695 /// getUnaryTransformationType - We don't unique these, since the memory 5696 /// savings are minimal and these are rare. 5697 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5698 QualType UnderlyingType, 5699 UnaryTransformType::UTTKind Kind) 5700 const { 5701 UnaryTransformType *ut = nullptr; 5702 5703 if (BaseType->isDependentType()) { 5704 // Look in the folding set for an existing type. 5705 llvm::FoldingSetNodeID ID; 5706 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5707 5708 void *InsertPos = nullptr; 5709 DependentUnaryTransformType *Canon 5710 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5711 5712 if (!Canon) { 5713 // Build a new, canonical __underlying_type(type) type. 5714 Canon = new (*this, alignof(DependentUnaryTransformType)) 5715 DependentUnaryTransformType(*this, getCanonicalType(BaseType), Kind); 5716 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5717 } 5718 ut = new (*this, alignof(UnaryTransformType)) 5719 UnaryTransformType(BaseType, QualType(), Kind, QualType(Canon, 0)); 5720 } else { 5721 QualType CanonType = getCanonicalType(UnderlyingType); 5722 ut = new (*this, alignof(UnaryTransformType)) 5723 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType); 5724 } 5725 Types.push_back(ut); 5726 return QualType(ut, 0); 5727 } 5728 5729 QualType ASTContext::getAutoTypeInternal( 5730 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5731 bool IsPack, ConceptDecl *TypeConstraintConcept, 5732 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5733 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5734 !TypeConstraintConcept && !IsDependent) 5735 return getAutoDeductType(); 5736 5737 // Look in the folding set for an existing type. 5738 void *InsertPos = nullptr; 5739 llvm::FoldingSetNodeID ID; 5740 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5741 TypeConstraintConcept, TypeConstraintArgs); 5742 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5743 return QualType(AT, 0); 5744 5745 QualType Canon; 5746 if (!IsCanon) { 5747 if (!DeducedType.isNull()) { 5748 Canon = DeducedType.getCanonicalType(); 5749 } else if (TypeConstraintConcept) { 5750 bool AnyNonCanonArgs = false; 5751 ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl(); 5752 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments( 5753 *this, TypeConstraintArgs, AnyNonCanonArgs); 5754 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) { 5755 Canon = 5756 getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5757 CanonicalConcept, CanonicalConceptArgs, true); 5758 // Find the insert position again. 5759 [[maybe_unused]] auto *Nothing = 5760 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5761 assert(!Nothing && "canonical type broken"); 5762 } 5763 } 5764 } 5765 5766 void *Mem = Allocate(sizeof(AutoType) + 5767 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5768 alignof(AutoType)); 5769 auto *AT = new (Mem) AutoType( 5770 DeducedType, Keyword, 5771 (IsDependent ? TypeDependence::DependentInstantiation 5772 : TypeDependence::None) | 5773 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5774 Canon, TypeConstraintConcept, TypeConstraintArgs); 5775 Types.push_back(AT); 5776 AutoTypes.InsertNode(AT, InsertPos); 5777 return QualType(AT, 0); 5778 } 5779 5780 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5781 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5782 /// canonical deduced-but-dependent 'auto' type. 5783 QualType 5784 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5785 bool IsDependent, bool IsPack, 5786 ConceptDecl *TypeConstraintConcept, 5787 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5788 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5789 assert((!IsDependent || DeducedType.isNull()) && 5790 "A dependent auto should be undeduced"); 5791 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5792 TypeConstraintConcept, TypeConstraintArgs); 5793 } 5794 5795 QualType ASTContext::getUnconstrainedType(QualType T) const { 5796 QualType CanonT = T.getCanonicalType(); 5797 5798 // Remove a type-constraint from a top-level auto or decltype(auto). 5799 if (auto *AT = CanonT->getAs<AutoType>()) { 5800 if (!AT->isConstrained()) 5801 return T; 5802 return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), false, 5803 AT->containsUnexpandedParameterPack()), 5804 T.getQualifiers()); 5805 } 5806 5807 // FIXME: We only support constrained auto at the top level in the type of a 5808 // non-type template parameter at the moment. Once we lift that restriction, 5809 // we'll need to recursively build types containing auto here. 5810 assert(!CanonT->getContainedAutoType() || 5811 !CanonT->getContainedAutoType()->isConstrained()); 5812 return T; 5813 } 5814 5815 /// Return the uniqued reference to the deduced template specialization type 5816 /// which has been deduced to the given type, or to the canonical undeduced 5817 /// such type, or the canonical deduced-but-dependent such type. 5818 QualType ASTContext::getDeducedTemplateSpecializationType( 5819 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5820 // Look in the folding set for an existing type. 5821 void *InsertPos = nullptr; 5822 llvm::FoldingSetNodeID ID; 5823 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5824 IsDependent); 5825 if (DeducedTemplateSpecializationType *DTST = 5826 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5827 return QualType(DTST, 0); 5828 5829 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType)) 5830 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5831 llvm::FoldingSetNodeID TempID; 5832 DTST->Profile(TempID); 5833 assert(ID == TempID && "ID does not match"); 5834 Types.push_back(DTST); 5835 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5836 return QualType(DTST, 0); 5837 } 5838 5839 /// getAtomicType - Return the uniqued reference to the atomic type for 5840 /// the given value type. 5841 QualType ASTContext::getAtomicType(QualType T) const { 5842 // Unique pointers, to guarantee there is only one pointer of a particular 5843 // structure. 5844 llvm::FoldingSetNodeID ID; 5845 AtomicType::Profile(ID, T); 5846 5847 void *InsertPos = nullptr; 5848 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5849 return QualType(AT, 0); 5850 5851 // If the atomic value type isn't canonical, this won't be a canonical type 5852 // either, so fill in the canonical type field. 5853 QualType Canonical; 5854 if (!T.isCanonical()) { 5855 Canonical = getAtomicType(getCanonicalType(T)); 5856 5857 // Get the new insert position for the node we care about. 5858 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5859 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5860 } 5861 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical); 5862 Types.push_back(New); 5863 AtomicTypes.InsertNode(New, InsertPos); 5864 return QualType(New, 0); 5865 } 5866 5867 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5868 QualType ASTContext::getAutoDeductType() const { 5869 if (AutoDeductTy.isNull()) 5870 AutoDeductTy = QualType(new (*this, alignof(AutoType)) 5871 AutoType(QualType(), AutoTypeKeyword::Auto, 5872 TypeDependence::None, QualType(), 5873 /*concept*/ nullptr, /*args*/ {}), 5874 0); 5875 return AutoDeductTy; 5876 } 5877 5878 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5879 QualType ASTContext::getAutoRRefDeductType() const { 5880 if (AutoRRefDeductTy.isNull()) 5881 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5882 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5883 return AutoRRefDeductTy; 5884 } 5885 5886 /// getTagDeclType - Return the unique reference to the type for the 5887 /// specified TagDecl (struct/union/class/enum) decl. 5888 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5889 assert(Decl); 5890 // FIXME: What is the design on getTagDeclType when it requires casting 5891 // away const? mutable? 5892 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5893 } 5894 5895 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5896 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5897 /// needs to agree with the definition in <stddef.h>. 5898 CanQualType ASTContext::getSizeType() const { 5899 return getFromTargetType(Target->getSizeType()); 5900 } 5901 5902 /// Return the unique signed counterpart of the integer type 5903 /// corresponding to size_t. 5904 CanQualType ASTContext::getSignedSizeType() const { 5905 return getFromTargetType(Target->getSignedSizeType()); 5906 } 5907 5908 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5909 CanQualType ASTContext::getIntMaxType() const { 5910 return getFromTargetType(Target->getIntMaxType()); 5911 } 5912 5913 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5914 CanQualType ASTContext::getUIntMaxType() const { 5915 return getFromTargetType(Target->getUIntMaxType()); 5916 } 5917 5918 /// getSignedWCharType - Return the type of "signed wchar_t". 5919 /// Used when in C++, as a GCC extension. 5920 QualType ASTContext::getSignedWCharType() const { 5921 // FIXME: derive from "Target" ? 5922 return WCharTy; 5923 } 5924 5925 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5926 /// Used when in C++, as a GCC extension. 5927 QualType ASTContext::getUnsignedWCharType() const { 5928 // FIXME: derive from "Target" ? 5929 return UnsignedIntTy; 5930 } 5931 5932 QualType ASTContext::getIntPtrType() const { 5933 return getFromTargetType(Target->getIntPtrType()); 5934 } 5935 5936 QualType ASTContext::getUIntPtrType() const { 5937 return getCorrespondingUnsignedType(getIntPtrType()); 5938 } 5939 5940 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5941 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5942 QualType ASTContext::getPointerDiffType() const { 5943 return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); 5944 } 5945 5946 /// Return the unique unsigned counterpart of "ptrdiff_t" 5947 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5948 /// in the definition of %tu format specifier. 5949 QualType ASTContext::getUnsignedPointerDiffType() const { 5950 return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); 5951 } 5952 5953 /// Return the unique type for "pid_t" defined in 5954 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5955 QualType ASTContext::getProcessIDType() const { 5956 return getFromTargetType(Target->getProcessIDType()); 5957 } 5958 5959 //===----------------------------------------------------------------------===// 5960 // Type Operators 5961 //===----------------------------------------------------------------------===// 5962 5963 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5964 // Push qualifiers into arrays, and then discard any remaining 5965 // qualifiers. 5966 T = getCanonicalType(T); 5967 T = getVariableArrayDecayedType(T); 5968 const Type *Ty = T.getTypePtr(); 5969 QualType Result; 5970 if (isa<ArrayType>(Ty)) { 5971 Result = getArrayDecayedType(QualType(Ty,0)); 5972 } else if (isa<FunctionType>(Ty)) { 5973 Result = getPointerType(QualType(Ty, 0)); 5974 } else { 5975 Result = QualType(Ty, 0); 5976 } 5977 5978 return CanQualType::CreateUnsafe(Result); 5979 } 5980 5981 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5982 Qualifiers &quals) { 5983 SplitQualType splitType = type.getSplitUnqualifiedType(); 5984 5985 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5986 // the unqualified desugared type and then drops it on the floor. 5987 // We then have to strip that sugar back off with 5988 // getUnqualifiedDesugaredType(), which is silly. 5989 const auto *AT = 5990 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5991 5992 // If we don't have an array, just use the results in splitType. 5993 if (!AT) { 5994 quals = splitType.Quals; 5995 return QualType(splitType.Ty, 0); 5996 } 5997 5998 // Otherwise, recurse on the array's element type. 5999 QualType elementType = AT->getElementType(); 6000 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 6001 6002 // If that didn't change the element type, AT has no qualifiers, so we 6003 // can just use the results in splitType. 6004 if (elementType == unqualElementType) { 6005 assert(quals.empty()); // from the recursive call 6006 quals = splitType.Quals; 6007 return QualType(splitType.Ty, 0); 6008 } 6009 6010 // Otherwise, add in the qualifiers from the outermost type, then 6011 // build the type back up. 6012 quals.addConsistentQualifiers(splitType.Quals); 6013 6014 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 6015 return getConstantArrayType(unqualElementType, CAT->getSize(), 6016 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 6017 } 6018 6019 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 6020 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 6021 } 6022 6023 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 6024 return getVariableArrayType(unqualElementType, 6025 VAT->getSizeExpr(), 6026 VAT->getSizeModifier(), 6027 VAT->getIndexTypeCVRQualifiers(), 6028 VAT->getBracketsRange()); 6029 } 6030 6031 const auto *DSAT = cast<DependentSizedArrayType>(AT); 6032 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 6033 DSAT->getSizeModifier(), 0, 6034 SourceRange()); 6035 } 6036 6037 /// Attempt to unwrap two types that may both be array types with the same bound 6038 /// (or both be array types of unknown bound) for the purpose of comparing the 6039 /// cv-decomposition of two types per C++ [conv.qual]. 6040 /// 6041 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6042 /// C++20 [conv.qual], if permitted by the current language mode. 6043 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 6044 bool AllowPiMismatch) { 6045 while (true) { 6046 auto *AT1 = getAsArrayType(T1); 6047 if (!AT1) 6048 return; 6049 6050 auto *AT2 = getAsArrayType(T2); 6051 if (!AT2) 6052 return; 6053 6054 // If we don't have two array types with the same constant bound nor two 6055 // incomplete array types, we've unwrapped everything we can. 6056 // C++20 also permits one type to be a constant array type and the other 6057 // to be an incomplete array type. 6058 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6059 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6060 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6061 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6062 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6063 isa<IncompleteArrayType>(AT2)))) 6064 return; 6065 } else if (isa<IncompleteArrayType>(AT1)) { 6066 if (!(isa<IncompleteArrayType>(AT2) || 6067 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6068 isa<ConstantArrayType>(AT2)))) 6069 return; 6070 } else { 6071 return; 6072 } 6073 6074 T1 = AT1->getElementType(); 6075 T2 = AT2->getElementType(); 6076 } 6077 } 6078 6079 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6080 /// 6081 /// If T1 and T2 are both pointer types of the same kind, or both array types 6082 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6083 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6084 /// 6085 /// This function will typically be called in a loop that successively 6086 /// "unwraps" pointer and pointer-to-member types to compare them at each 6087 /// level. 6088 /// 6089 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6090 /// C++20 [conv.qual], if permitted by the current language mode. 6091 /// 6092 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6093 /// pair of types that can't be unwrapped further. 6094 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6095 bool AllowPiMismatch) { 6096 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6097 6098 const auto *T1PtrType = T1->getAs<PointerType>(); 6099 const auto *T2PtrType = T2->getAs<PointerType>(); 6100 if (T1PtrType && T2PtrType) { 6101 T1 = T1PtrType->getPointeeType(); 6102 T2 = T2PtrType->getPointeeType(); 6103 return true; 6104 } 6105 6106 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6107 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6108 if (T1MPType && T2MPType && 6109 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6110 QualType(T2MPType->getClass(), 0))) { 6111 T1 = T1MPType->getPointeeType(); 6112 T2 = T2MPType->getPointeeType(); 6113 return true; 6114 } 6115 6116 if (getLangOpts().ObjC) { 6117 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6118 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6119 if (T1OPType && T2OPType) { 6120 T1 = T1OPType->getPointeeType(); 6121 T2 = T2OPType->getPointeeType(); 6122 return true; 6123 } 6124 } 6125 6126 // FIXME: Block pointers, too? 6127 6128 return false; 6129 } 6130 6131 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6132 while (true) { 6133 Qualifiers Quals; 6134 T1 = getUnqualifiedArrayType(T1, Quals); 6135 T2 = getUnqualifiedArrayType(T2, Quals); 6136 if (hasSameType(T1, T2)) 6137 return true; 6138 if (!UnwrapSimilarTypes(T1, T2)) 6139 return false; 6140 } 6141 } 6142 6143 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6144 while (true) { 6145 Qualifiers Quals1, Quals2; 6146 T1 = getUnqualifiedArrayType(T1, Quals1); 6147 T2 = getUnqualifiedArrayType(T2, Quals2); 6148 6149 Quals1.removeCVRQualifiers(); 6150 Quals2.removeCVRQualifiers(); 6151 if (Quals1 != Quals2) 6152 return false; 6153 6154 if (hasSameType(T1, T2)) 6155 return true; 6156 6157 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6158 return false; 6159 } 6160 } 6161 6162 DeclarationNameInfo 6163 ASTContext::getNameForTemplate(TemplateName Name, 6164 SourceLocation NameLoc) const { 6165 switch (Name.getKind()) { 6166 case TemplateName::QualifiedTemplate: 6167 case TemplateName::Template: 6168 // DNInfo work in progress: CHECKME: what about DNLoc? 6169 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6170 NameLoc); 6171 6172 case TemplateName::OverloadedTemplate: { 6173 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6174 // DNInfo work in progress: CHECKME: what about DNLoc? 6175 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6176 } 6177 6178 case TemplateName::AssumedTemplate: { 6179 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6180 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6181 } 6182 6183 case TemplateName::DependentTemplate: { 6184 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6185 DeclarationName DName; 6186 if (DTN->isIdentifier()) { 6187 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6188 return DeclarationNameInfo(DName, NameLoc); 6189 } else { 6190 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6191 // DNInfo work in progress: FIXME: source locations? 6192 DeclarationNameLoc DNLoc = 6193 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6194 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6195 } 6196 } 6197 6198 case TemplateName::SubstTemplateTemplateParm: { 6199 SubstTemplateTemplateParmStorage *subst 6200 = Name.getAsSubstTemplateTemplateParm(); 6201 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6202 NameLoc); 6203 } 6204 6205 case TemplateName::SubstTemplateTemplateParmPack: { 6206 SubstTemplateTemplateParmPackStorage *subst 6207 = Name.getAsSubstTemplateTemplateParmPack(); 6208 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6209 NameLoc); 6210 } 6211 case TemplateName::UsingTemplate: 6212 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6213 NameLoc); 6214 } 6215 6216 llvm_unreachable("bad template name kind!"); 6217 } 6218 6219 TemplateName 6220 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6221 switch (Name.getKind()) { 6222 case TemplateName::UsingTemplate: 6223 case TemplateName::QualifiedTemplate: 6224 case TemplateName::Template: { 6225 TemplateDecl *Template = Name.getAsTemplateDecl(); 6226 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6227 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6228 6229 // The canonical template name is the canonical template declaration. 6230 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6231 } 6232 6233 case TemplateName::OverloadedTemplate: 6234 case TemplateName::AssumedTemplate: 6235 llvm_unreachable("cannot canonicalize unresolved template"); 6236 6237 case TemplateName::DependentTemplate: { 6238 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6239 assert(DTN && "Non-dependent template names must refer to template decls."); 6240 return DTN->CanonicalTemplateName; 6241 } 6242 6243 case TemplateName::SubstTemplateTemplateParm: { 6244 SubstTemplateTemplateParmStorage *subst 6245 = Name.getAsSubstTemplateTemplateParm(); 6246 return getCanonicalTemplateName(subst->getReplacement()); 6247 } 6248 6249 case TemplateName::SubstTemplateTemplateParmPack: { 6250 SubstTemplateTemplateParmPackStorage *subst = 6251 Name.getAsSubstTemplateTemplateParmPack(); 6252 TemplateArgument canonArgPack = 6253 getCanonicalTemplateArgument(subst->getArgumentPack()); 6254 return getSubstTemplateTemplateParmPack( 6255 canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(), 6256 subst->getFinal(), subst->getIndex()); 6257 } 6258 } 6259 6260 llvm_unreachable("bad template name!"); 6261 } 6262 6263 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6264 const TemplateName &Y) const { 6265 return getCanonicalTemplateName(X).getAsVoidPointer() == 6266 getCanonicalTemplateName(Y).getAsVoidPointer(); 6267 } 6268 6269 bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { 6270 if (!XCE != !YCE) 6271 return false; 6272 6273 if (!XCE) 6274 return true; 6275 6276 llvm::FoldingSetNodeID XCEID, YCEID; 6277 XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6278 YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6279 return XCEID == YCEID; 6280 } 6281 6282 bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, 6283 const TypeConstraint *YTC) const { 6284 if (!XTC != !YTC) 6285 return false; 6286 6287 if (!XTC) 6288 return true; 6289 6290 auto *NCX = XTC->getNamedConcept(); 6291 auto *NCY = YTC->getNamedConcept(); 6292 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6293 return false; 6294 if (XTC->getConceptReference()->hasExplicitTemplateArgs() != 6295 YTC->getConceptReference()->hasExplicitTemplateArgs()) 6296 return false; 6297 if (XTC->getConceptReference()->hasExplicitTemplateArgs()) 6298 if (XTC->getConceptReference() 6299 ->getTemplateArgsAsWritten() 6300 ->NumTemplateArgs != 6301 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs) 6302 return false; 6303 6304 // Compare slowly by profiling. 6305 // 6306 // We couldn't compare the profiling result for the template 6307 // args here. Consider the following example in different modules: 6308 // 6309 // template <__integer_like _Tp, C<_Tp> Sentinel> 6310 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { 6311 // return __t; 6312 // } 6313 // 6314 // When we compare the profiling result for `C<_Tp>` in different 6315 // modules, it will compare the type of `_Tp` in different modules. 6316 // However, the type of `_Tp` in different modules refer to different 6317 // types here naturally. So we couldn't compare the profiling result 6318 // for the template args directly. 6319 return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), 6320 YTC->getImmediatelyDeclaredConstraint()); 6321 } 6322 6323 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6324 const NamedDecl *Y) const { 6325 if (X->getKind() != Y->getKind()) 6326 return false; 6327 6328 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6329 auto *TY = cast<TemplateTypeParmDecl>(Y); 6330 if (TX->isParameterPack() != TY->isParameterPack()) 6331 return false; 6332 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6333 return false; 6334 return isSameTypeConstraint(TX->getTypeConstraint(), 6335 TY->getTypeConstraint()); 6336 } 6337 6338 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6339 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6340 return TX->isParameterPack() == TY->isParameterPack() && 6341 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) && 6342 isSameConstraintExpr(TX->getPlaceholderTypeConstraint(), 6343 TY->getPlaceholderTypeConstraint()); 6344 } 6345 6346 auto *TX = cast<TemplateTemplateParmDecl>(X); 6347 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6348 return TX->isParameterPack() == TY->isParameterPack() && 6349 isSameTemplateParameterList(TX->getTemplateParameters(), 6350 TY->getTemplateParameters()); 6351 } 6352 6353 bool ASTContext::isSameTemplateParameterList( 6354 const TemplateParameterList *X, const TemplateParameterList *Y) const { 6355 if (X->size() != Y->size()) 6356 return false; 6357 6358 for (unsigned I = 0, N = X->size(); I != N; ++I) 6359 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6360 return false; 6361 6362 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); 6363 } 6364 6365 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, 6366 const NamedDecl *Y) const { 6367 // If the type parameter isn't the same already, we don't need to check the 6368 // default argument further. 6369 if (!isSameTemplateParameter(X, Y)) 6370 return false; 6371 6372 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { 6373 auto *TTPY = cast<TemplateTypeParmDecl>(Y); 6374 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6375 return false; 6376 6377 return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument()); 6378 } 6379 6380 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6381 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); 6382 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) 6383 return false; 6384 6385 Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts(); 6386 Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts(); 6387 llvm::FoldingSetNodeID XID, YID; 6388 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); 6389 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); 6390 return XID == YID; 6391 } 6392 6393 auto *TTPX = cast<TemplateTemplateParmDecl>(X); 6394 auto *TTPY = cast<TemplateTemplateParmDecl>(Y); 6395 6396 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6397 return false; 6398 6399 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); 6400 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); 6401 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); 6402 } 6403 6404 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6405 if (auto *NS = X->getAsNamespace()) 6406 return NS; 6407 if (auto *NAS = X->getAsNamespaceAlias()) 6408 return NAS->getNamespace(); 6409 return nullptr; 6410 } 6411 6412 static bool isSameQualifier(const NestedNameSpecifier *X, 6413 const NestedNameSpecifier *Y) { 6414 if (auto *NSX = getNamespace(X)) { 6415 auto *NSY = getNamespace(Y); 6416 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6417 return false; 6418 } else if (X->getKind() != Y->getKind()) 6419 return false; 6420 6421 // FIXME: For namespaces and types, we're permitted to check that the entity 6422 // is named via the same tokens. We should probably do so. 6423 switch (X->getKind()) { 6424 case NestedNameSpecifier::Identifier: 6425 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6426 return false; 6427 break; 6428 case NestedNameSpecifier::Namespace: 6429 case NestedNameSpecifier::NamespaceAlias: 6430 // We've already checked that we named the same namespace. 6431 break; 6432 case NestedNameSpecifier::TypeSpec: 6433 case NestedNameSpecifier::TypeSpecWithTemplate: 6434 if (X->getAsType()->getCanonicalTypeInternal() != 6435 Y->getAsType()->getCanonicalTypeInternal()) 6436 return false; 6437 break; 6438 case NestedNameSpecifier::Global: 6439 case NestedNameSpecifier::Super: 6440 return true; 6441 } 6442 6443 // Recurse into earlier portion of NNS, if any. 6444 auto *PX = X->getPrefix(); 6445 auto *PY = Y->getPrefix(); 6446 if (PX && PY) 6447 return isSameQualifier(PX, PY); 6448 return !PX && !PY; 6449 } 6450 6451 /// Determine whether the attributes we can overload on are identical for A and 6452 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6453 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6454 const FunctionDecl *B) { 6455 // Note that pass_object_size attributes are represented in the function's 6456 // ExtParameterInfo, so we don't need to check them here. 6457 6458 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6459 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6460 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6461 6462 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6463 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6464 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6465 6466 // Return false if the number of enable_if attributes is different. 6467 if (!Cand1A || !Cand2A) 6468 return false; 6469 6470 Cand1ID.clear(); 6471 Cand2ID.clear(); 6472 6473 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6474 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6475 6476 // Return false if any of the enable_if expressions of A and B are 6477 // different. 6478 if (Cand1ID != Cand2ID) 6479 return false; 6480 } 6481 return true; 6482 } 6483 6484 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { 6485 // Caution: this function is called by the AST reader during deserialization, 6486 // so it cannot rely on AST invariants being met. Non-trivial accessors 6487 // should be avoided, along with any traversal of redeclaration chains. 6488 6489 if (X == Y) 6490 return true; 6491 6492 if (X->getDeclName() != Y->getDeclName()) 6493 return false; 6494 6495 // Must be in the same context. 6496 // 6497 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6498 // could be two different declarations of the same function. (We will fix the 6499 // semantic DC to refer to the primary definition after merging.) 6500 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6501 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6502 return false; 6503 6504 // Two typedefs refer to the same entity if they have the same underlying 6505 // type. 6506 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6507 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6508 return hasSameType(TypedefX->getUnderlyingType(), 6509 TypedefY->getUnderlyingType()); 6510 6511 // Must have the same kind. 6512 if (X->getKind() != Y->getKind()) 6513 return false; 6514 6515 // Objective-C classes and protocols with the same name always match. 6516 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6517 return true; 6518 6519 if (isa<ClassTemplateSpecializationDecl>(X)) { 6520 // No need to handle these here: we merge them when adding them to the 6521 // template. 6522 return false; 6523 } 6524 6525 // Compatible tags match. 6526 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6527 const auto *TagY = cast<TagDecl>(Y); 6528 return (TagX->getTagKind() == TagY->getTagKind()) || 6529 ((TagX->getTagKind() == TagTypeKind::Struct || 6530 TagX->getTagKind() == TagTypeKind::Class || 6531 TagX->getTagKind() == TagTypeKind::Interface) && 6532 (TagY->getTagKind() == TagTypeKind::Struct || 6533 TagY->getTagKind() == TagTypeKind::Class || 6534 TagY->getTagKind() == TagTypeKind::Interface)); 6535 } 6536 6537 // Functions with the same type and linkage match. 6538 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6539 // functions, etc. 6540 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6541 const auto *FuncY = cast<FunctionDecl>(Y); 6542 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6543 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6544 if (CtorX->getInheritedConstructor() && 6545 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6546 CtorY->getInheritedConstructor().getConstructor())) 6547 return false; 6548 } 6549 6550 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6551 return false; 6552 6553 // Multiversioned functions with different feature strings are represented 6554 // as separate declarations. 6555 if (FuncX->isMultiVersion()) { 6556 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6557 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6558 assert(TAX && TAY && "Multiversion Function without target attribute"); 6559 6560 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6561 return false; 6562 } 6563 6564 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes 6565 // not the same entity if they are constrained. 6566 if ((FuncX->isMemberLikeConstrainedFriend() || 6567 FuncY->isMemberLikeConstrainedFriend()) && 6568 !FuncX->getLexicalDeclContext()->Equals( 6569 FuncY->getLexicalDeclContext())) { 6570 return false; 6571 } 6572 6573 if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), 6574 FuncY->getTrailingRequiresClause())) 6575 return false; 6576 6577 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6578 // Map to the first declaration that we've already merged into this one. 6579 // The TSI of redeclarations might not match (due to calling conventions 6580 // being inherited onto the type but not the TSI), but the TSI type of 6581 // the first declaration of the function should match across modules. 6582 FD = FD->getCanonicalDecl(); 6583 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6584 : FD->getType(); 6585 }; 6586 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6587 if (!hasSameType(XT, YT)) { 6588 // We can get functions with different types on the redecl chain in C++17 6589 // if they have differing exception specifications and at least one of 6590 // the excpetion specs is unresolved. 6591 auto *XFPT = XT->getAs<FunctionProtoType>(); 6592 auto *YFPT = YT->getAs<FunctionProtoType>(); 6593 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6594 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6595 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6596 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6597 return true; 6598 return false; 6599 } 6600 6601 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6602 hasSameOverloadableAttrs(FuncX, FuncY); 6603 } 6604 6605 // Variables with the same type and linkage match. 6606 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6607 const auto *VarY = cast<VarDecl>(Y); 6608 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6609 // During deserialization, we might compare variables before we load 6610 // their types. Assume the types will end up being the same. 6611 if (VarX->getType().isNull() || VarY->getType().isNull()) 6612 return true; 6613 6614 if (hasSameType(VarX->getType(), VarY->getType())) 6615 return true; 6616 6617 // We can get decls with different types on the redecl chain. Eg. 6618 // template <typename T> struct S { static T Var[]; }; // #1 6619 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6620 // Only? happens when completing an incomplete array type. In this case 6621 // when comparing #1 and #2 we should go through their element type. 6622 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6623 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6624 if (!VarXTy || !VarYTy) 6625 return false; 6626 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6627 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6628 } 6629 return false; 6630 } 6631 6632 // Namespaces with the same name and inlinedness match. 6633 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6634 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6635 return NamespaceX->isInline() == NamespaceY->isInline(); 6636 } 6637 6638 // Identical template names and kinds match if their template parameter lists 6639 // and patterns match. 6640 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6641 const auto *TemplateY = cast<TemplateDecl>(Y); 6642 6643 // ConceptDecl wouldn't be the same if their constraint expression differs. 6644 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { 6645 const auto *ConceptY = cast<ConceptDecl>(Y); 6646 if (!isSameConstraintExpr(ConceptX->getConstraintExpr(), 6647 ConceptY->getConstraintExpr())) 6648 return false; 6649 } 6650 6651 return isSameEntity(TemplateX->getTemplatedDecl(), 6652 TemplateY->getTemplatedDecl()) && 6653 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6654 TemplateY->getTemplateParameters()); 6655 } 6656 6657 // Fields with the same name and the same type match. 6658 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6659 const auto *FDY = cast<FieldDecl>(Y); 6660 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6661 return hasSameType(FDX->getType(), FDY->getType()); 6662 } 6663 6664 // Indirect fields with the same target field match. 6665 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6666 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6667 return IFDX->getAnonField()->getCanonicalDecl() == 6668 IFDY->getAnonField()->getCanonicalDecl(); 6669 } 6670 6671 // Enumerators with the same name match. 6672 if (isa<EnumConstantDecl>(X)) 6673 // FIXME: Also check the value is odr-equivalent. 6674 return true; 6675 6676 // Using shadow declarations with the same target match. 6677 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6678 const auto *USY = cast<UsingShadowDecl>(Y); 6679 return USX->getTargetDecl() == USY->getTargetDecl(); 6680 } 6681 6682 // Using declarations with the same qualifier match. (We already know that 6683 // the name matches.) 6684 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6685 const auto *UY = cast<UsingDecl>(Y); 6686 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6687 UX->hasTypename() == UY->hasTypename() && 6688 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6689 } 6690 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6691 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6692 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6693 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6694 } 6695 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6696 return isSameQualifier( 6697 UX->getQualifier(), 6698 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6699 } 6700 6701 // Using-pack declarations are only created by instantiation, and match if 6702 // they're instantiated from matching UnresolvedUsing...Decls. 6703 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6704 return declaresSameEntity( 6705 UX->getInstantiatedFromUsingDecl(), 6706 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6707 } 6708 6709 // Namespace alias definitions with the same target match. 6710 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6711 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6712 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6713 } 6714 6715 return false; 6716 } 6717 6718 TemplateArgument 6719 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6720 switch (Arg.getKind()) { 6721 case TemplateArgument::Null: 6722 return Arg; 6723 6724 case TemplateArgument::Expression: 6725 return Arg; 6726 6727 case TemplateArgument::Declaration: { 6728 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6729 return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()), 6730 Arg.getIsDefaulted()); 6731 } 6732 6733 case TemplateArgument::NullPtr: 6734 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6735 /*isNullPtr*/ true, Arg.getIsDefaulted()); 6736 6737 case TemplateArgument::Template: 6738 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()), 6739 Arg.getIsDefaulted()); 6740 6741 case TemplateArgument::TemplateExpansion: 6742 return TemplateArgument( 6743 getCanonicalTemplateName(Arg.getAsTemplateOrTemplatePattern()), 6744 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted()); 6745 6746 case TemplateArgument::Integral: 6747 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6748 6749 case TemplateArgument::Type: 6750 return TemplateArgument(getCanonicalType(Arg.getAsType()), 6751 /*isNullPtr*/ false, Arg.getIsDefaulted()); 6752 6753 case TemplateArgument::Pack: { 6754 bool AnyNonCanonArgs = false; 6755 auto CanonArgs = ::getCanonicalTemplateArguments( 6756 *this, Arg.pack_elements(), AnyNonCanonArgs); 6757 if (!AnyNonCanonArgs) 6758 return Arg; 6759 return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), 6760 CanonArgs); 6761 } 6762 } 6763 6764 // Silence GCC warning 6765 llvm_unreachable("Unhandled template argument kind"); 6766 } 6767 6768 NestedNameSpecifier * 6769 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6770 if (!NNS) 6771 return nullptr; 6772 6773 switch (NNS->getKind()) { 6774 case NestedNameSpecifier::Identifier: 6775 // Canonicalize the prefix but keep the identifier the same. 6776 return NestedNameSpecifier::Create(*this, 6777 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6778 NNS->getAsIdentifier()); 6779 6780 case NestedNameSpecifier::Namespace: 6781 // A namespace is canonical; build a nested-name-specifier with 6782 // this namespace and no prefix. 6783 return NestedNameSpecifier::Create(*this, nullptr, 6784 NNS->getAsNamespace()->getOriginalNamespace()); 6785 6786 case NestedNameSpecifier::NamespaceAlias: 6787 // A namespace is canonical; build a nested-name-specifier with 6788 // this namespace and no prefix. 6789 return NestedNameSpecifier::Create(*this, nullptr, 6790 NNS->getAsNamespaceAlias()->getNamespace() 6791 ->getOriginalNamespace()); 6792 6793 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6794 // latter will have the 'template' keyword when printed. 6795 case NestedNameSpecifier::TypeSpec: 6796 case NestedNameSpecifier::TypeSpecWithTemplate: { 6797 const Type *T = getCanonicalType(NNS->getAsType()); 6798 6799 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6800 // break it apart into its prefix and identifier, then reconsititute those 6801 // as the canonical nested-name-specifier. This is required to canonicalize 6802 // a dependent nested-name-specifier involving typedefs of dependent-name 6803 // types, e.g., 6804 // typedef typename T::type T1; 6805 // typedef typename T1::type T2; 6806 if (const auto *DNT = T->getAs<DependentNameType>()) 6807 return NestedNameSpecifier::Create( 6808 *this, DNT->getQualifier(), 6809 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6810 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6811 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6812 const_cast<Type *>(T)); 6813 6814 // TODO: Set 'Template' parameter to true for other template types. 6815 return NestedNameSpecifier::Create(*this, nullptr, false, 6816 const_cast<Type *>(T)); 6817 } 6818 6819 case NestedNameSpecifier::Global: 6820 case NestedNameSpecifier::Super: 6821 // The global specifier and __super specifer are canonical and unique. 6822 return NNS; 6823 } 6824 6825 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6826 } 6827 6828 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6829 // Handle the non-qualified case efficiently. 6830 if (!T.hasLocalQualifiers()) { 6831 // Handle the common positive case fast. 6832 if (const auto *AT = dyn_cast<ArrayType>(T)) 6833 return AT; 6834 } 6835 6836 // Handle the common negative case fast. 6837 if (!isa<ArrayType>(T.getCanonicalType())) 6838 return nullptr; 6839 6840 // Apply any qualifiers from the array type to the element type. This 6841 // implements C99 6.7.3p8: "If the specification of an array type includes 6842 // any type qualifiers, the element type is so qualified, not the array type." 6843 6844 // If we get here, we either have type qualifiers on the type, or we have 6845 // sugar such as a typedef in the way. If we have type qualifiers on the type 6846 // we must propagate them down into the element type. 6847 6848 SplitQualType split = T.getSplitDesugaredType(); 6849 Qualifiers qs = split.Quals; 6850 6851 // If we have a simple case, just return now. 6852 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6853 if (!ATy || qs.empty()) 6854 return ATy; 6855 6856 // Otherwise, we have an array and we have qualifiers on it. Push the 6857 // qualifiers into the array element type and return a new array type. 6858 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6859 6860 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6861 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6862 CAT->getSizeExpr(), 6863 CAT->getSizeModifier(), 6864 CAT->getIndexTypeCVRQualifiers())); 6865 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6866 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6867 IAT->getSizeModifier(), 6868 IAT->getIndexTypeCVRQualifiers())); 6869 6870 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6871 return cast<ArrayType>( 6872 getDependentSizedArrayType(NewEltTy, 6873 DSAT->getSizeExpr(), 6874 DSAT->getSizeModifier(), 6875 DSAT->getIndexTypeCVRQualifiers(), 6876 DSAT->getBracketsRange())); 6877 6878 const auto *VAT = cast<VariableArrayType>(ATy); 6879 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6880 VAT->getSizeExpr(), 6881 VAT->getSizeModifier(), 6882 VAT->getIndexTypeCVRQualifiers(), 6883 VAT->getBracketsRange())); 6884 } 6885 6886 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6887 if (T->isArrayType() || T->isFunctionType()) 6888 return getDecayedType(T); 6889 return T; 6890 } 6891 6892 QualType ASTContext::getSignatureParameterType(QualType T) const { 6893 T = getVariableArrayDecayedType(T); 6894 T = getAdjustedParameterType(T); 6895 return T.getUnqualifiedType(); 6896 } 6897 6898 QualType ASTContext::getExceptionObjectType(QualType T) const { 6899 // C++ [except.throw]p3: 6900 // A throw-expression initializes a temporary object, called the exception 6901 // object, the type of which is determined by removing any top-level 6902 // cv-qualifiers from the static type of the operand of throw and adjusting 6903 // the type from "array of T" or "function returning T" to "pointer to T" 6904 // or "pointer to function returning T", [...] 6905 T = getVariableArrayDecayedType(T); 6906 if (T->isArrayType() || T->isFunctionType()) 6907 T = getDecayedType(T); 6908 return T.getUnqualifiedType(); 6909 } 6910 6911 /// getArrayDecayedType - Return the properly qualified result of decaying the 6912 /// specified array type to a pointer. This operation is non-trivial when 6913 /// handling typedefs etc. The canonical type of "T" must be an array type, 6914 /// this returns a pointer to a properly qualified element of the array. 6915 /// 6916 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6917 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6918 // Get the element type with 'getAsArrayType' so that we don't lose any 6919 // typedefs in the element type of the array. This also handles propagation 6920 // of type qualifiers from the array type into the element type if present 6921 // (C99 6.7.3p8). 6922 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6923 assert(PrettyArrayType && "Not an array type!"); 6924 6925 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6926 6927 // int x[restrict 4] -> int *restrict 6928 QualType Result = getQualifiedType(PtrTy, 6929 PrettyArrayType->getIndexTypeQualifiers()); 6930 6931 // int x[_Nullable] -> int * _Nullable 6932 if (auto Nullability = Ty->getNullability()) { 6933 Result = const_cast<ASTContext *>(this)->getAttributedType( 6934 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6935 } 6936 return Result; 6937 } 6938 6939 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6940 return getBaseElementType(array->getElementType()); 6941 } 6942 6943 QualType ASTContext::getBaseElementType(QualType type) const { 6944 Qualifiers qs; 6945 while (true) { 6946 SplitQualType split = type.getSplitDesugaredType(); 6947 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6948 if (!array) break; 6949 6950 type = array->getElementType(); 6951 qs.addConsistentQualifiers(split.Quals); 6952 } 6953 6954 return getQualifiedType(type, qs); 6955 } 6956 6957 /// getConstantArrayElementCount - Returns number of constant array elements. 6958 uint64_t 6959 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6960 uint64_t ElementCount = 1; 6961 do { 6962 ElementCount *= CA->getSize().getZExtValue(); 6963 CA = dyn_cast_or_null<ConstantArrayType>( 6964 CA->getElementType()->getAsArrayTypeUnsafe()); 6965 } while (CA); 6966 return ElementCount; 6967 } 6968 6969 uint64_t ASTContext::getArrayInitLoopExprElementCount( 6970 const ArrayInitLoopExpr *AILE) const { 6971 if (!AILE) 6972 return 0; 6973 6974 uint64_t ElementCount = 1; 6975 6976 do { 6977 ElementCount *= AILE->getArraySize().getZExtValue(); 6978 AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr()); 6979 } while (AILE); 6980 6981 return ElementCount; 6982 } 6983 6984 /// getFloatingRank - Return a relative rank for floating point types. 6985 /// This routine will assert if passed a built-in type that isn't a float. 6986 static FloatingRank getFloatingRank(QualType T) { 6987 if (const auto *CT = T->getAs<ComplexType>()) 6988 return getFloatingRank(CT->getElementType()); 6989 6990 switch (T->castAs<BuiltinType>()->getKind()) { 6991 default: llvm_unreachable("getFloatingRank(): not a floating type"); 6992 case BuiltinType::Float16: return Float16Rank; 6993 case BuiltinType::Half: return HalfRank; 6994 case BuiltinType::Float: return FloatRank; 6995 case BuiltinType::Double: return DoubleRank; 6996 case BuiltinType::LongDouble: return LongDoubleRank; 6997 case BuiltinType::Float128: return Float128Rank; 6998 case BuiltinType::BFloat16: return BFloat16Rank; 6999 case BuiltinType::Ibm128: return Ibm128Rank; 7000 } 7001 } 7002 7003 /// getFloatingTypeOrder - Compare the rank of the two specified floating 7004 /// point types, ignoring the domain of the type (i.e. 'double' == 7005 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 7006 /// LHS < RHS, return -1. 7007 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 7008 FloatingRank LHSR = getFloatingRank(LHS); 7009 FloatingRank RHSR = getFloatingRank(RHS); 7010 7011 if (LHSR == RHSR) 7012 return 0; 7013 if (LHSR > RHSR) 7014 return 1; 7015 return -1; 7016 } 7017 7018 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 7019 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 7020 return 0; 7021 return getFloatingTypeOrder(LHS, RHS); 7022 } 7023 7024 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 7025 /// routine will assert if passed a built-in type that isn't an integer or enum, 7026 /// or if it is not canonicalized. 7027 unsigned ASTContext::getIntegerRank(const Type *T) const { 7028 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 7029 7030 // Results in this 'losing' to any type of the same size, but winning if 7031 // larger. 7032 if (const auto *EIT = dyn_cast<BitIntType>(T)) 7033 return 0 + (EIT->getNumBits() << 3); 7034 7035 switch (cast<BuiltinType>(T)->getKind()) { 7036 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 7037 case BuiltinType::Bool: 7038 return 1 + (getIntWidth(BoolTy) << 3); 7039 case BuiltinType::Char_S: 7040 case BuiltinType::Char_U: 7041 case BuiltinType::SChar: 7042 case BuiltinType::UChar: 7043 return 2 + (getIntWidth(CharTy) << 3); 7044 case BuiltinType::Short: 7045 case BuiltinType::UShort: 7046 return 3 + (getIntWidth(ShortTy) << 3); 7047 case BuiltinType::Int: 7048 case BuiltinType::UInt: 7049 return 4 + (getIntWidth(IntTy) << 3); 7050 case BuiltinType::Long: 7051 case BuiltinType::ULong: 7052 return 5 + (getIntWidth(LongTy) << 3); 7053 case BuiltinType::LongLong: 7054 case BuiltinType::ULongLong: 7055 return 6 + (getIntWidth(LongLongTy) << 3); 7056 case BuiltinType::Int128: 7057 case BuiltinType::UInt128: 7058 return 7 + (getIntWidth(Int128Ty) << 3); 7059 7060 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of 7061 // their underlying types" [c++20 conv.rank] 7062 case BuiltinType::Char8: 7063 return getIntegerRank(UnsignedCharTy.getTypePtr()); 7064 case BuiltinType::Char16: 7065 return getIntegerRank( 7066 getFromTargetType(Target->getChar16Type()).getTypePtr()); 7067 case BuiltinType::Char32: 7068 return getIntegerRank( 7069 getFromTargetType(Target->getChar32Type()).getTypePtr()); 7070 case BuiltinType::WChar_S: 7071 case BuiltinType::WChar_U: 7072 return getIntegerRank( 7073 getFromTargetType(Target->getWCharType()).getTypePtr()); 7074 } 7075 } 7076 7077 /// Whether this is a promotable bitfield reference according 7078 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 7079 /// 7080 /// \returns the type this bit-field will promote to, or NULL if no 7081 /// promotion occurs. 7082 QualType ASTContext::isPromotableBitField(Expr *E) const { 7083 if (E->isTypeDependent() || E->isValueDependent()) 7084 return {}; 7085 7086 // C++ [conv.prom]p5: 7087 // If the bit-field has an enumerated type, it is treated as any other 7088 // value of that type for promotion purposes. 7089 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 7090 return {}; 7091 7092 // FIXME: We should not do this unless E->refersToBitField() is true. This 7093 // matters in C where getSourceBitField() will find bit-fields for various 7094 // cases where the source expression is not a bit-field designator. 7095 7096 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 7097 if (!Field) 7098 return {}; 7099 7100 QualType FT = Field->getType(); 7101 7102 uint64_t BitWidth = Field->getBitWidthValue(*this); 7103 uint64_t IntSize = getTypeSize(IntTy); 7104 // C++ [conv.prom]p5: 7105 // A prvalue for an integral bit-field can be converted to a prvalue of type 7106 // int if int can represent all the values of the bit-field; otherwise, it 7107 // can be converted to unsigned int if unsigned int can represent all the 7108 // values of the bit-field. If the bit-field is larger yet, no integral 7109 // promotion applies to it. 7110 // C11 6.3.1.1/2: 7111 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 7112 // If an int can represent all values of the original type (as restricted by 7113 // the width, for a bit-field), the value is converted to an int; otherwise, 7114 // it is converted to an unsigned int. 7115 // 7116 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 7117 // We perform that promotion here to match GCC and C++. 7118 // FIXME: C does not permit promotion of an enum bit-field whose rank is 7119 // greater than that of 'int'. We perform that promotion to match GCC. 7120 if (BitWidth < IntSize) 7121 return IntTy; 7122 7123 if (BitWidth == IntSize) 7124 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 7125 7126 // Bit-fields wider than int are not subject to promotions, and therefore act 7127 // like the base type. GCC has some weird bugs in this area that we 7128 // deliberately do not follow (GCC follows a pre-standard resolution to 7129 // C's DR315 which treats bit-width as being part of the type, and this leaks 7130 // into their semantics in some cases). 7131 return {}; 7132 } 7133 7134 /// getPromotedIntegerType - Returns the type that Promotable will 7135 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 7136 /// integer type. 7137 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 7138 assert(!Promotable.isNull()); 7139 assert(isPromotableIntegerType(Promotable)); 7140 if (const auto *ET = Promotable->getAs<EnumType>()) 7141 return ET->getDecl()->getPromotionType(); 7142 7143 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 7144 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 7145 // (3.9.1) can be converted to a prvalue of the first of the following 7146 // types that can represent all the values of its underlying type: 7147 // int, unsigned int, long int, unsigned long int, long long int, or 7148 // unsigned long long int [...] 7149 // FIXME: Is there some better way to compute this? 7150 if (BT->getKind() == BuiltinType::WChar_S || 7151 BT->getKind() == BuiltinType::WChar_U || 7152 BT->getKind() == BuiltinType::Char8 || 7153 BT->getKind() == BuiltinType::Char16 || 7154 BT->getKind() == BuiltinType::Char32) { 7155 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 7156 uint64_t FromSize = getTypeSize(BT); 7157 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 7158 LongLongTy, UnsignedLongLongTy }; 7159 for (const auto &PT : PromoteTypes) { 7160 uint64_t ToSize = getTypeSize(PT); 7161 if (FromSize < ToSize || 7162 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType())) 7163 return PT; 7164 } 7165 llvm_unreachable("char type should fit into long long"); 7166 } 7167 } 7168 7169 // At this point, we should have a signed or unsigned integer type. 7170 if (Promotable->isSignedIntegerType()) 7171 return IntTy; 7172 uint64_t PromotableSize = getIntWidth(Promotable); 7173 uint64_t IntSize = getIntWidth(IntTy); 7174 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7175 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7176 } 7177 7178 /// Recurses in pointer/array types until it finds an objc retainable 7179 /// type and returns its ownership. 7180 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7181 while (!T.isNull()) { 7182 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7183 return T.getObjCLifetime(); 7184 if (T->isArrayType()) 7185 T = getBaseElementType(T); 7186 else if (const auto *PT = T->getAs<PointerType>()) 7187 T = PT->getPointeeType(); 7188 else if (const auto *RT = T->getAs<ReferenceType>()) 7189 T = RT->getPointeeType(); 7190 else 7191 break; 7192 } 7193 7194 return Qualifiers::OCL_None; 7195 } 7196 7197 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7198 // Incomplete enum types are not treated as integer types. 7199 // FIXME: In C++, enum types are never integer types. 7200 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7201 return ET->getDecl()->getIntegerType().getTypePtr(); 7202 return nullptr; 7203 } 7204 7205 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7206 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7207 /// LHS < RHS, return -1. 7208 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7209 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7210 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7211 7212 // Unwrap enums to their underlying type. 7213 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7214 LHSC = getIntegerTypeForEnum(ET); 7215 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7216 RHSC = getIntegerTypeForEnum(ET); 7217 7218 if (LHSC == RHSC) return 0; 7219 7220 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7221 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7222 7223 unsigned LHSRank = getIntegerRank(LHSC); 7224 unsigned RHSRank = getIntegerRank(RHSC); 7225 7226 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7227 if (LHSRank == RHSRank) return 0; 7228 return LHSRank > RHSRank ? 1 : -1; 7229 } 7230 7231 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7232 if (LHSUnsigned) { 7233 // If the unsigned [LHS] type is larger, return it. 7234 if (LHSRank >= RHSRank) 7235 return 1; 7236 7237 // If the signed type can represent all values of the unsigned type, it 7238 // wins. Because we are dealing with 2's complement and types that are 7239 // powers of two larger than each other, this is always safe. 7240 return -1; 7241 } 7242 7243 // If the unsigned [RHS] type is larger, return it. 7244 if (RHSRank >= LHSRank) 7245 return -1; 7246 7247 // If the signed type can represent all values of the unsigned type, it 7248 // wins. Because we are dealing with 2's complement and types that are 7249 // powers of two larger than each other, this is always safe. 7250 return 1; 7251 } 7252 7253 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7254 if (CFConstantStringTypeDecl) 7255 return CFConstantStringTypeDecl; 7256 7257 assert(!CFConstantStringTagDecl && 7258 "tag and typedef should be initialized together"); 7259 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7260 CFConstantStringTagDecl->startDefinition(); 7261 7262 struct { 7263 QualType Type; 7264 const char *Name; 7265 } Fields[5]; 7266 unsigned Count = 0; 7267 7268 /// Objective-C ABI 7269 /// 7270 /// typedef struct __NSConstantString_tag { 7271 /// const int *isa; 7272 /// int flags; 7273 /// const char *str; 7274 /// long length; 7275 /// } __NSConstantString; 7276 /// 7277 /// Swift ABI (4.1, 4.2) 7278 /// 7279 /// typedef struct __NSConstantString_tag { 7280 /// uintptr_t _cfisa; 7281 /// uintptr_t _swift_rc; 7282 /// _Atomic(uint64_t) _cfinfoa; 7283 /// const char *_ptr; 7284 /// uint32_t _length; 7285 /// } __NSConstantString; 7286 /// 7287 /// Swift ABI (5.0) 7288 /// 7289 /// typedef struct __NSConstantString_tag { 7290 /// uintptr_t _cfisa; 7291 /// uintptr_t _swift_rc; 7292 /// _Atomic(uint64_t) _cfinfoa; 7293 /// const char *_ptr; 7294 /// uintptr_t _length; 7295 /// } __NSConstantString; 7296 7297 const auto CFRuntime = getLangOpts().CFRuntime; 7298 if (static_cast<unsigned>(CFRuntime) < 7299 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7300 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7301 Fields[Count++] = { IntTy, "flags" }; 7302 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7303 Fields[Count++] = { LongTy, "length" }; 7304 } else { 7305 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7306 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7307 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7308 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7309 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7310 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7311 Fields[Count++] = { IntTy, "_ptr" }; 7312 else 7313 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7314 } 7315 7316 // Create fields 7317 for (unsigned i = 0; i < Count; ++i) { 7318 FieldDecl *Field = 7319 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7320 SourceLocation(), &Idents.get(Fields[i].Name), 7321 Fields[i].Type, /*TInfo=*/nullptr, 7322 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7323 Field->setAccess(AS_public); 7324 CFConstantStringTagDecl->addDecl(Field); 7325 } 7326 7327 CFConstantStringTagDecl->completeDefinition(); 7328 // This type is designed to be compatible with NSConstantString, but cannot 7329 // use the same name, since NSConstantString is an interface. 7330 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7331 CFConstantStringTypeDecl = 7332 buildImplicitTypedef(tagType, "__NSConstantString"); 7333 7334 return CFConstantStringTypeDecl; 7335 } 7336 7337 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7338 if (!CFConstantStringTagDecl) 7339 getCFConstantStringDecl(); // Build the tag and the typedef. 7340 return CFConstantStringTagDecl; 7341 } 7342 7343 // getCFConstantStringType - Return the type used for constant CFStrings. 7344 QualType ASTContext::getCFConstantStringType() const { 7345 return getTypedefType(getCFConstantStringDecl()); 7346 } 7347 7348 QualType ASTContext::getObjCSuperType() const { 7349 if (ObjCSuperType.isNull()) { 7350 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7351 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7352 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7353 } 7354 return ObjCSuperType; 7355 } 7356 7357 void ASTContext::setCFConstantStringType(QualType T) { 7358 const auto *TD = T->castAs<TypedefType>(); 7359 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7360 const auto *TagType = 7361 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7362 CFConstantStringTagDecl = TagType->getDecl(); 7363 } 7364 7365 QualType ASTContext::getBlockDescriptorType() const { 7366 if (BlockDescriptorType) 7367 return getTagDeclType(BlockDescriptorType); 7368 7369 RecordDecl *RD; 7370 // FIXME: Needs the FlagAppleBlock bit. 7371 RD = buildImplicitRecord("__block_descriptor"); 7372 RD->startDefinition(); 7373 7374 QualType FieldTypes[] = { 7375 UnsignedLongTy, 7376 UnsignedLongTy, 7377 }; 7378 7379 static const char *const FieldNames[] = { 7380 "reserved", 7381 "Size" 7382 }; 7383 7384 for (size_t i = 0; i < 2; ++i) { 7385 FieldDecl *Field = FieldDecl::Create( 7386 *this, RD, SourceLocation(), SourceLocation(), 7387 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7388 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7389 Field->setAccess(AS_public); 7390 RD->addDecl(Field); 7391 } 7392 7393 RD->completeDefinition(); 7394 7395 BlockDescriptorType = RD; 7396 7397 return getTagDeclType(BlockDescriptorType); 7398 } 7399 7400 QualType ASTContext::getBlockDescriptorExtendedType() const { 7401 if (BlockDescriptorExtendedType) 7402 return getTagDeclType(BlockDescriptorExtendedType); 7403 7404 RecordDecl *RD; 7405 // FIXME: Needs the FlagAppleBlock bit. 7406 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7407 RD->startDefinition(); 7408 7409 QualType FieldTypes[] = { 7410 UnsignedLongTy, 7411 UnsignedLongTy, 7412 getPointerType(VoidPtrTy), 7413 getPointerType(VoidPtrTy) 7414 }; 7415 7416 static const char *const FieldNames[] = { 7417 "reserved", 7418 "Size", 7419 "CopyFuncPtr", 7420 "DestroyFuncPtr" 7421 }; 7422 7423 for (size_t i = 0; i < 4; ++i) { 7424 FieldDecl *Field = FieldDecl::Create( 7425 *this, RD, SourceLocation(), SourceLocation(), 7426 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7427 /*BitWidth=*/nullptr, 7428 /*Mutable=*/false, ICIS_NoInit); 7429 Field->setAccess(AS_public); 7430 RD->addDecl(Field); 7431 } 7432 7433 RD->completeDefinition(); 7434 7435 BlockDescriptorExtendedType = RD; 7436 return getTagDeclType(BlockDescriptorExtendedType); 7437 } 7438 7439 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7440 const auto *BT = dyn_cast<BuiltinType>(T); 7441 7442 if (!BT) { 7443 if (isa<PipeType>(T)) 7444 return OCLTK_Pipe; 7445 7446 return OCLTK_Default; 7447 } 7448 7449 switch (BT->getKind()) { 7450 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7451 case BuiltinType::Id: \ 7452 return OCLTK_Image; 7453 #include "clang/Basic/OpenCLImageTypes.def" 7454 7455 case BuiltinType::OCLClkEvent: 7456 return OCLTK_ClkEvent; 7457 7458 case BuiltinType::OCLEvent: 7459 return OCLTK_Event; 7460 7461 case BuiltinType::OCLQueue: 7462 return OCLTK_Queue; 7463 7464 case BuiltinType::OCLReserveID: 7465 return OCLTK_ReserveID; 7466 7467 case BuiltinType::OCLSampler: 7468 return OCLTK_Sampler; 7469 7470 default: 7471 return OCLTK_Default; 7472 } 7473 } 7474 7475 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7476 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7477 } 7478 7479 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7480 /// requires copy/dispose. Note that this must match the logic 7481 /// in buildByrefHelpers. 7482 bool ASTContext::BlockRequiresCopying(QualType Ty, 7483 const VarDecl *D) { 7484 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7485 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7486 if (!copyExpr && record->hasTrivialDestructor()) return false; 7487 7488 return true; 7489 } 7490 7491 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7492 // move or destroy. 7493 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7494 return true; 7495 7496 if (!Ty->isObjCRetainableType()) return false; 7497 7498 Qualifiers qs = Ty.getQualifiers(); 7499 7500 // If we have lifetime, that dominates. 7501 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7502 switch (lifetime) { 7503 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7504 7505 // These are just bits as far as the runtime is concerned. 7506 case Qualifiers::OCL_ExplicitNone: 7507 case Qualifiers::OCL_Autoreleasing: 7508 return false; 7509 7510 // These cases should have been taken care of when checking the type's 7511 // non-triviality. 7512 case Qualifiers::OCL_Weak: 7513 case Qualifiers::OCL_Strong: 7514 llvm_unreachable("impossible"); 7515 } 7516 llvm_unreachable("fell out of lifetime switch!"); 7517 } 7518 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7519 Ty->isObjCObjectPointerType()); 7520 } 7521 7522 bool ASTContext::getByrefLifetime(QualType Ty, 7523 Qualifiers::ObjCLifetime &LifeTime, 7524 bool &HasByrefExtendedLayout) const { 7525 if (!getLangOpts().ObjC || 7526 getLangOpts().getGC() != LangOptions::NonGC) 7527 return false; 7528 7529 HasByrefExtendedLayout = false; 7530 if (Ty->isRecordType()) { 7531 HasByrefExtendedLayout = true; 7532 LifeTime = Qualifiers::OCL_None; 7533 } else if ((LifeTime = Ty.getObjCLifetime())) { 7534 // Honor the ARC qualifiers. 7535 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7536 // The MRR rule. 7537 LifeTime = Qualifiers::OCL_ExplicitNone; 7538 } else { 7539 LifeTime = Qualifiers::OCL_None; 7540 } 7541 return true; 7542 } 7543 7544 CanQualType ASTContext::getNSUIntegerType() const { 7545 assert(Target && "Expected target to be initialized"); 7546 const llvm::Triple &T = Target->getTriple(); 7547 // Windows is LLP64 rather than LP64 7548 if (T.isOSWindows() && T.isArch64Bit()) 7549 return UnsignedLongLongTy; 7550 return UnsignedLongTy; 7551 } 7552 7553 CanQualType ASTContext::getNSIntegerType() const { 7554 assert(Target && "Expected target to be initialized"); 7555 const llvm::Triple &T = Target->getTriple(); 7556 // Windows is LLP64 rather than LP64 7557 if (T.isOSWindows() && T.isArch64Bit()) 7558 return LongLongTy; 7559 return LongTy; 7560 } 7561 7562 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7563 if (!ObjCInstanceTypeDecl) 7564 ObjCInstanceTypeDecl = 7565 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7566 return ObjCInstanceTypeDecl; 7567 } 7568 7569 // This returns true if a type has been typedefed to BOOL: 7570 // typedef <type> BOOL; 7571 static bool isTypeTypedefedAsBOOL(QualType T) { 7572 if (const auto *TT = dyn_cast<TypedefType>(T)) 7573 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7574 return II->isStr("BOOL"); 7575 7576 return false; 7577 } 7578 7579 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7580 /// purpose. 7581 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7582 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7583 return CharUnits::Zero(); 7584 7585 CharUnits sz = getTypeSizeInChars(type); 7586 7587 // Make all integer and enum types at least as large as an int 7588 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7589 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7590 // Treat arrays as pointers, since that's how they're passed in. 7591 else if (type->isArrayType()) 7592 sz = getTypeSizeInChars(VoidPtrTy); 7593 return sz; 7594 } 7595 7596 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7597 return getTargetInfo().getCXXABI().isMicrosoft() && 7598 VD->isStaticDataMember() && 7599 VD->getType()->isIntegralOrEnumerationType() && 7600 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7601 } 7602 7603 ASTContext::InlineVariableDefinitionKind 7604 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7605 if (!VD->isInline()) 7606 return InlineVariableDefinitionKind::None; 7607 7608 // In almost all cases, it's a weak definition. 7609 auto *First = VD->getFirstDecl(); 7610 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7611 return InlineVariableDefinitionKind::Weak; 7612 7613 // If there's a file-context declaration in this translation unit, it's a 7614 // non-discardable definition. 7615 for (auto *D : VD->redecls()) 7616 if (D->getLexicalDeclContext()->isFileContext() && 7617 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7618 return InlineVariableDefinitionKind::Strong; 7619 7620 // If we've not seen one yet, we don't know. 7621 return InlineVariableDefinitionKind::WeakUnknown; 7622 } 7623 7624 static std::string charUnitsToString(const CharUnits &CU) { 7625 return llvm::itostr(CU.getQuantity()); 7626 } 7627 7628 /// getObjCEncodingForBlock - Return the encoded type for this block 7629 /// declaration. 7630 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7631 std::string S; 7632 7633 const BlockDecl *Decl = Expr->getBlockDecl(); 7634 QualType BlockTy = 7635 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7636 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7637 // Encode result type. 7638 if (getLangOpts().EncodeExtendedBlockSig) 7639 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7640 true /*Extended*/); 7641 else 7642 getObjCEncodingForType(BlockReturnTy, S); 7643 // Compute size of all parameters. 7644 // Start with computing size of a pointer in number of bytes. 7645 // FIXME: There might(should) be a better way of doing this computation! 7646 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7647 CharUnits ParmOffset = PtrSize; 7648 for (auto *PI : Decl->parameters()) { 7649 QualType PType = PI->getType(); 7650 CharUnits sz = getObjCEncodingTypeSize(PType); 7651 if (sz.isZero()) 7652 continue; 7653 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7654 ParmOffset += sz; 7655 } 7656 // Size of the argument frame 7657 S += charUnitsToString(ParmOffset); 7658 // Block pointer and offset. 7659 S += "@?0"; 7660 7661 // Argument types. 7662 ParmOffset = PtrSize; 7663 for (auto *PVDecl : Decl->parameters()) { 7664 QualType PType = PVDecl->getOriginalType(); 7665 if (const auto *AT = 7666 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7667 // Use array's original type only if it has known number of 7668 // elements. 7669 if (!isa<ConstantArrayType>(AT)) 7670 PType = PVDecl->getType(); 7671 } else if (PType->isFunctionType()) 7672 PType = PVDecl->getType(); 7673 if (getLangOpts().EncodeExtendedBlockSig) 7674 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7675 S, true /*Extended*/); 7676 else 7677 getObjCEncodingForType(PType, S); 7678 S += charUnitsToString(ParmOffset); 7679 ParmOffset += getObjCEncodingTypeSize(PType); 7680 } 7681 7682 return S; 7683 } 7684 7685 std::string 7686 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7687 std::string S; 7688 // Encode result type. 7689 getObjCEncodingForType(Decl->getReturnType(), S); 7690 CharUnits ParmOffset; 7691 // Compute size of all parameters. 7692 for (auto *PI : Decl->parameters()) { 7693 QualType PType = PI->getType(); 7694 CharUnits sz = getObjCEncodingTypeSize(PType); 7695 if (sz.isZero()) 7696 continue; 7697 7698 assert(sz.isPositive() && 7699 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7700 ParmOffset += sz; 7701 } 7702 S += charUnitsToString(ParmOffset); 7703 ParmOffset = CharUnits::Zero(); 7704 7705 // Argument types. 7706 for (auto *PVDecl : Decl->parameters()) { 7707 QualType PType = PVDecl->getOriginalType(); 7708 if (const auto *AT = 7709 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7710 // Use array's original type only if it has known number of 7711 // elements. 7712 if (!isa<ConstantArrayType>(AT)) 7713 PType = PVDecl->getType(); 7714 } else if (PType->isFunctionType()) 7715 PType = PVDecl->getType(); 7716 getObjCEncodingForType(PType, S); 7717 S += charUnitsToString(ParmOffset); 7718 ParmOffset += getObjCEncodingTypeSize(PType); 7719 } 7720 7721 return S; 7722 } 7723 7724 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7725 /// method parameter or return type. If Extended, include class names and 7726 /// block object types. 7727 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7728 QualType T, std::string& S, 7729 bool Extended) const { 7730 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7731 getObjCEncodingForTypeQualifier(QT, S); 7732 // Encode parameter type. 7733 ObjCEncOptions Options = ObjCEncOptions() 7734 .setExpandPointedToStructures() 7735 .setExpandStructures() 7736 .setIsOutermostType(); 7737 if (Extended) 7738 Options.setEncodeBlockParameters().setEncodeClassNames(); 7739 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7740 } 7741 7742 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7743 /// declaration. 7744 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7745 bool Extended) const { 7746 // FIXME: This is not very efficient. 7747 // Encode return type. 7748 std::string S; 7749 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7750 Decl->getReturnType(), S, Extended); 7751 // Compute size of all parameters. 7752 // Start with computing size of a pointer in number of bytes. 7753 // FIXME: There might(should) be a better way of doing this computation! 7754 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7755 // The first two arguments (self and _cmd) are pointers; account for 7756 // their size. 7757 CharUnits ParmOffset = 2 * PtrSize; 7758 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7759 E = Decl->sel_param_end(); PI != E; ++PI) { 7760 QualType PType = (*PI)->getType(); 7761 CharUnits sz = getObjCEncodingTypeSize(PType); 7762 if (sz.isZero()) 7763 continue; 7764 7765 assert(sz.isPositive() && 7766 "getObjCEncodingForMethodDecl - Incomplete param type"); 7767 ParmOffset += sz; 7768 } 7769 S += charUnitsToString(ParmOffset); 7770 S += "@0:"; 7771 S += charUnitsToString(PtrSize); 7772 7773 // Argument types. 7774 ParmOffset = 2 * PtrSize; 7775 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7776 E = Decl->sel_param_end(); PI != E; ++PI) { 7777 const ParmVarDecl *PVDecl = *PI; 7778 QualType PType = PVDecl->getOriginalType(); 7779 if (const auto *AT = 7780 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7781 // Use array's original type only if it has known number of 7782 // elements. 7783 if (!isa<ConstantArrayType>(AT)) 7784 PType = PVDecl->getType(); 7785 } else if (PType->isFunctionType()) 7786 PType = PVDecl->getType(); 7787 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7788 PType, S, Extended); 7789 S += charUnitsToString(ParmOffset); 7790 ParmOffset += getObjCEncodingTypeSize(PType); 7791 } 7792 7793 return S; 7794 } 7795 7796 ObjCPropertyImplDecl * 7797 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7798 const ObjCPropertyDecl *PD, 7799 const Decl *Container) const { 7800 if (!Container) 7801 return nullptr; 7802 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7803 for (auto *PID : CID->property_impls()) 7804 if (PID->getPropertyDecl() == PD) 7805 return PID; 7806 } else { 7807 const auto *OID = cast<ObjCImplementationDecl>(Container); 7808 for (auto *PID : OID->property_impls()) 7809 if (PID->getPropertyDecl() == PD) 7810 return PID; 7811 } 7812 return nullptr; 7813 } 7814 7815 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7816 /// property declaration. If non-NULL, Container must be either an 7817 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7818 /// NULL when getting encodings for protocol properties. 7819 /// Property attributes are stored as a comma-delimited C string. The simple 7820 /// attributes readonly and bycopy are encoded as single characters. The 7821 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7822 /// encoded as single characters, followed by an identifier. Property types 7823 /// are also encoded as a parametrized attribute. The characters used to encode 7824 /// these attributes are defined by the following enumeration: 7825 /// @code 7826 /// enum PropertyAttributes { 7827 /// kPropertyReadOnly = 'R', // property is read-only. 7828 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7829 /// kPropertyByref = '&', // property is a reference to the value last assigned 7830 /// kPropertyDynamic = 'D', // property is dynamic 7831 /// kPropertyGetter = 'G', // followed by getter selector name 7832 /// kPropertySetter = 'S', // followed by setter selector name 7833 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7834 /// kPropertyType = 'T' // followed by old-style type encoding. 7835 /// kPropertyWeak = 'W' // 'weak' property 7836 /// kPropertyStrong = 'P' // property GC'able 7837 /// kPropertyNonAtomic = 'N' // property non-atomic 7838 /// kPropertyOptional = '?' // property optional 7839 /// }; 7840 /// @endcode 7841 std::string 7842 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7843 const Decl *Container) const { 7844 // Collect information from the property implementation decl(s). 7845 bool Dynamic = false; 7846 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7847 7848 if (ObjCPropertyImplDecl *PropertyImpDecl = 7849 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7850 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7851 Dynamic = true; 7852 else 7853 SynthesizePID = PropertyImpDecl; 7854 } 7855 7856 // FIXME: This is not very efficient. 7857 std::string S = "T"; 7858 7859 // Encode result type. 7860 // GCC has some special rules regarding encoding of properties which 7861 // closely resembles encoding of ivars. 7862 getObjCEncodingForPropertyType(PD->getType(), S); 7863 7864 if (PD->isOptional()) 7865 S += ",?"; 7866 7867 if (PD->isReadOnly()) { 7868 S += ",R"; 7869 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7870 S += ",C"; 7871 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7872 S += ",&"; 7873 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7874 S += ",W"; 7875 } else { 7876 switch (PD->getSetterKind()) { 7877 case ObjCPropertyDecl::Assign: break; 7878 case ObjCPropertyDecl::Copy: S += ",C"; break; 7879 case ObjCPropertyDecl::Retain: S += ",&"; break; 7880 case ObjCPropertyDecl::Weak: S += ",W"; break; 7881 } 7882 } 7883 7884 // It really isn't clear at all what this means, since properties 7885 // are "dynamic by default". 7886 if (Dynamic) 7887 S += ",D"; 7888 7889 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7890 S += ",N"; 7891 7892 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7893 S += ",G"; 7894 S += PD->getGetterName().getAsString(); 7895 } 7896 7897 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7898 S += ",S"; 7899 S += PD->getSetterName().getAsString(); 7900 } 7901 7902 if (SynthesizePID) { 7903 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7904 S += ",V"; 7905 S += OID->getNameAsString(); 7906 } 7907 7908 // FIXME: OBJCGC: weak & strong 7909 return S; 7910 } 7911 7912 /// getLegacyIntegralTypeEncoding - 7913 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7914 /// 'l' or 'L' , but not always. For typedefs, we need to use 7915 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7916 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7917 if (PointeeTy->getAs<TypedefType>()) { 7918 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7919 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7920 PointeeTy = UnsignedIntTy; 7921 else 7922 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7923 PointeeTy = IntTy; 7924 } 7925 } 7926 } 7927 7928 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7929 const FieldDecl *Field, 7930 QualType *NotEncodedT) const { 7931 // We follow the behavior of gcc, expanding structures which are 7932 // directly pointed to, and expanding embedded structures. Note that 7933 // these rules are sufficient to prevent recursive encoding of the 7934 // same type. 7935 getObjCEncodingForTypeImpl(T, S, 7936 ObjCEncOptions() 7937 .setExpandPointedToStructures() 7938 .setExpandStructures() 7939 .setIsOutermostType(), 7940 Field, NotEncodedT); 7941 } 7942 7943 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7944 std::string& S) const { 7945 // Encode result type. 7946 // GCC has some special rules regarding encoding of properties which 7947 // closely resembles encoding of ivars. 7948 getObjCEncodingForTypeImpl(T, S, 7949 ObjCEncOptions() 7950 .setExpandPointedToStructures() 7951 .setExpandStructures() 7952 .setIsOutermostType() 7953 .setEncodingProperty(), 7954 /*Field=*/nullptr); 7955 } 7956 7957 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7958 const BuiltinType *BT) { 7959 BuiltinType::Kind kind = BT->getKind(); 7960 switch (kind) { 7961 case BuiltinType::Void: return 'v'; 7962 case BuiltinType::Bool: return 'B'; 7963 case BuiltinType::Char8: 7964 case BuiltinType::Char_U: 7965 case BuiltinType::UChar: return 'C'; 7966 case BuiltinType::Char16: 7967 case BuiltinType::UShort: return 'S'; 7968 case BuiltinType::Char32: 7969 case BuiltinType::UInt: return 'I'; 7970 case BuiltinType::ULong: 7971 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7972 case BuiltinType::UInt128: return 'T'; 7973 case BuiltinType::ULongLong: return 'Q'; 7974 case BuiltinType::Char_S: 7975 case BuiltinType::SChar: return 'c'; 7976 case BuiltinType::Short: return 's'; 7977 case BuiltinType::WChar_S: 7978 case BuiltinType::WChar_U: 7979 case BuiltinType::Int: return 'i'; 7980 case BuiltinType::Long: 7981 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7982 case BuiltinType::LongLong: return 'q'; 7983 case BuiltinType::Int128: return 't'; 7984 case BuiltinType::Float: return 'f'; 7985 case BuiltinType::Double: return 'd'; 7986 case BuiltinType::LongDouble: return 'D'; 7987 case BuiltinType::NullPtr: return '*'; // like char* 7988 7989 case BuiltinType::BFloat16: 7990 case BuiltinType::Float16: 7991 case BuiltinType::Float128: 7992 case BuiltinType::Ibm128: 7993 case BuiltinType::Half: 7994 case BuiltinType::ShortAccum: 7995 case BuiltinType::Accum: 7996 case BuiltinType::LongAccum: 7997 case BuiltinType::UShortAccum: 7998 case BuiltinType::UAccum: 7999 case BuiltinType::ULongAccum: 8000 case BuiltinType::ShortFract: 8001 case BuiltinType::Fract: 8002 case BuiltinType::LongFract: 8003 case BuiltinType::UShortFract: 8004 case BuiltinType::UFract: 8005 case BuiltinType::ULongFract: 8006 case BuiltinType::SatShortAccum: 8007 case BuiltinType::SatAccum: 8008 case BuiltinType::SatLongAccum: 8009 case BuiltinType::SatUShortAccum: 8010 case BuiltinType::SatUAccum: 8011 case BuiltinType::SatULongAccum: 8012 case BuiltinType::SatShortFract: 8013 case BuiltinType::SatFract: 8014 case BuiltinType::SatLongFract: 8015 case BuiltinType::SatUShortFract: 8016 case BuiltinType::SatUFract: 8017 case BuiltinType::SatULongFract: 8018 // FIXME: potentially need @encodes for these! 8019 return ' '; 8020 8021 #define SVE_TYPE(Name, Id, SingletonId) \ 8022 case BuiltinType::Id: 8023 #include "clang/Basic/AArch64SVEACLETypes.def" 8024 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8025 #include "clang/Basic/RISCVVTypes.def" 8026 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8027 #include "clang/Basic/WebAssemblyReferenceTypes.def" 8028 { 8029 DiagnosticsEngine &Diags = C->getDiagnostics(); 8030 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 8031 "cannot yet @encode type %0"); 8032 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 8033 return ' '; 8034 } 8035 8036 case BuiltinType::ObjCId: 8037 case BuiltinType::ObjCClass: 8038 case BuiltinType::ObjCSel: 8039 llvm_unreachable("@encoding ObjC primitive type"); 8040 8041 // OpenCL and placeholder types don't need @encodings. 8042 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 8043 case BuiltinType::Id: 8044 #include "clang/Basic/OpenCLImageTypes.def" 8045 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 8046 case BuiltinType::Id: 8047 #include "clang/Basic/OpenCLExtensionTypes.def" 8048 case BuiltinType::OCLEvent: 8049 case BuiltinType::OCLClkEvent: 8050 case BuiltinType::OCLQueue: 8051 case BuiltinType::OCLReserveID: 8052 case BuiltinType::OCLSampler: 8053 case BuiltinType::Dependent: 8054 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 8055 case BuiltinType::Id: 8056 #include "clang/Basic/PPCTypes.def" 8057 #define BUILTIN_TYPE(KIND, ID) 8058 #define PLACEHOLDER_TYPE(KIND, ID) \ 8059 case BuiltinType::KIND: 8060 #include "clang/AST/BuiltinTypes.def" 8061 llvm_unreachable("invalid builtin type for @encode"); 8062 } 8063 llvm_unreachable("invalid BuiltinType::Kind value"); 8064 } 8065 8066 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 8067 EnumDecl *Enum = ET->getDecl(); 8068 8069 // The encoding of an non-fixed enum type is always 'i', regardless of size. 8070 if (!Enum->isFixed()) 8071 return 'i'; 8072 8073 // The encoding of a fixed enum type matches its fixed underlying type. 8074 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 8075 return getObjCEncodingForPrimitiveType(C, BT); 8076 } 8077 8078 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 8079 QualType T, const FieldDecl *FD) { 8080 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 8081 S += 'b'; 8082 // The NeXT runtime encodes bit fields as b followed by the number of bits. 8083 // The GNU runtime requires more information; bitfields are encoded as b, 8084 // then the offset (in bits) of the first element, then the type of the 8085 // bitfield, then the size in bits. For example, in this structure: 8086 // 8087 // struct 8088 // { 8089 // int integer; 8090 // int flags:2; 8091 // }; 8092 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 8093 // runtime, but b32i2 for the GNU runtime. The reason for this extra 8094 // information is not especially sensible, but we're stuck with it for 8095 // compatibility with GCC, although providing it breaks anything that 8096 // actually uses runtime introspection and wants to work on both runtimes... 8097 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 8098 uint64_t Offset; 8099 8100 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 8101 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 8102 IVD); 8103 } else { 8104 const RecordDecl *RD = FD->getParent(); 8105 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 8106 Offset = RL.getFieldOffset(FD->getFieldIndex()); 8107 } 8108 8109 S += llvm::utostr(Offset); 8110 8111 if (const auto *ET = T->getAs<EnumType>()) 8112 S += ObjCEncodingForEnumType(Ctx, ET); 8113 else { 8114 const auto *BT = T->castAs<BuiltinType>(); 8115 S += getObjCEncodingForPrimitiveType(Ctx, BT); 8116 } 8117 } 8118 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 8119 } 8120 8121 // Helper function for determining whether the encoded type string would include 8122 // a template specialization type. 8123 static bool hasTemplateSpecializationInEncodedString(const Type *T, 8124 bool VisitBasesAndFields) { 8125 T = T->getBaseElementTypeUnsafe(); 8126 8127 if (auto *PT = T->getAs<PointerType>()) 8128 return hasTemplateSpecializationInEncodedString( 8129 PT->getPointeeType().getTypePtr(), false); 8130 8131 auto *CXXRD = T->getAsCXXRecordDecl(); 8132 8133 if (!CXXRD) 8134 return false; 8135 8136 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 8137 return true; 8138 8139 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 8140 return false; 8141 8142 for (const auto &B : CXXRD->bases()) 8143 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 8144 true)) 8145 return true; 8146 8147 for (auto *FD : CXXRD->fields()) 8148 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 8149 true)) 8150 return true; 8151 8152 return false; 8153 } 8154 8155 // FIXME: Use SmallString for accumulating string. 8156 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 8157 const ObjCEncOptions Options, 8158 const FieldDecl *FD, 8159 QualType *NotEncodedT) const { 8160 CanQualType CT = getCanonicalType(T); 8161 switch (CT->getTypeClass()) { 8162 case Type::Builtin: 8163 case Type::Enum: 8164 if (FD && FD->isBitField()) 8165 return EncodeBitField(this, S, T, FD); 8166 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 8167 S += getObjCEncodingForPrimitiveType(this, BT); 8168 else 8169 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8170 return; 8171 8172 case Type::Complex: 8173 S += 'j'; 8174 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8175 ObjCEncOptions(), 8176 /*Field=*/nullptr); 8177 return; 8178 8179 case Type::Atomic: 8180 S += 'A'; 8181 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8182 ObjCEncOptions(), 8183 /*Field=*/nullptr); 8184 return; 8185 8186 // encoding for pointer or reference types. 8187 case Type::Pointer: 8188 case Type::LValueReference: 8189 case Type::RValueReference: { 8190 QualType PointeeTy; 8191 if (isa<PointerType>(CT)) { 8192 const auto *PT = T->castAs<PointerType>(); 8193 if (PT->isObjCSelType()) { 8194 S += ':'; 8195 return; 8196 } 8197 PointeeTy = PT->getPointeeType(); 8198 } else { 8199 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8200 } 8201 8202 bool isReadOnly = false; 8203 // For historical/compatibility reasons, the read-only qualifier of the 8204 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8205 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8206 // Also, do not emit the 'r' for anything but the outermost type! 8207 if (T->getAs<TypedefType>()) { 8208 if (Options.IsOutermostType() && T.isConstQualified()) { 8209 isReadOnly = true; 8210 S += 'r'; 8211 } 8212 } else if (Options.IsOutermostType()) { 8213 QualType P = PointeeTy; 8214 while (auto PT = P->getAs<PointerType>()) 8215 P = PT->getPointeeType(); 8216 if (P.isConstQualified()) { 8217 isReadOnly = true; 8218 S += 'r'; 8219 } 8220 } 8221 if (isReadOnly) { 8222 // Another legacy compatibility encoding. Some ObjC qualifier and type 8223 // combinations need to be rearranged. 8224 // Rewrite "in const" from "nr" to "rn" 8225 if (StringRef(S).ends_with("nr")) 8226 S.replace(S.end()-2, S.end(), "rn"); 8227 } 8228 8229 if (PointeeTy->isCharType()) { 8230 // char pointer types should be encoded as '*' unless it is a 8231 // type that has been typedef'd to 'BOOL'. 8232 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8233 S += '*'; 8234 return; 8235 } 8236 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8237 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8238 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8239 S += '#'; 8240 return; 8241 } 8242 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8243 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8244 S += '@'; 8245 return; 8246 } 8247 // If the encoded string for the class includes template names, just emit 8248 // "^v" for pointers to the class. 8249 if (getLangOpts().CPlusPlus && 8250 (!getLangOpts().EncodeCXXClassTemplateSpec && 8251 hasTemplateSpecializationInEncodedString( 8252 RTy, Options.ExpandPointedToStructures()))) { 8253 S += "^v"; 8254 return; 8255 } 8256 // fall through... 8257 } 8258 S += '^'; 8259 getLegacyIntegralTypeEncoding(PointeeTy); 8260 8261 ObjCEncOptions NewOptions; 8262 if (Options.ExpandPointedToStructures()) 8263 NewOptions.setExpandStructures(); 8264 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8265 /*Field=*/nullptr, NotEncodedT); 8266 return; 8267 } 8268 8269 case Type::ConstantArray: 8270 case Type::IncompleteArray: 8271 case Type::VariableArray: { 8272 const auto *AT = cast<ArrayType>(CT); 8273 8274 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8275 // Incomplete arrays are encoded as a pointer to the array element. 8276 S += '^'; 8277 8278 getObjCEncodingForTypeImpl( 8279 AT->getElementType(), S, 8280 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8281 } else { 8282 S += '['; 8283 8284 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8285 S += llvm::utostr(CAT->getSize().getZExtValue()); 8286 else { 8287 //Variable length arrays are encoded as a regular array with 0 elements. 8288 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8289 "Unknown array type!"); 8290 S += '0'; 8291 } 8292 8293 getObjCEncodingForTypeImpl( 8294 AT->getElementType(), S, 8295 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8296 NotEncodedT); 8297 S += ']'; 8298 } 8299 return; 8300 } 8301 8302 case Type::FunctionNoProto: 8303 case Type::FunctionProto: 8304 S += '?'; 8305 return; 8306 8307 case Type::Record: { 8308 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8309 S += RDecl->isUnion() ? '(' : '{'; 8310 // Anonymous structures print as '?' 8311 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8312 S += II->getName(); 8313 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8314 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8315 llvm::raw_string_ostream OS(S); 8316 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8317 getPrintingPolicy()); 8318 } 8319 } else { 8320 S += '?'; 8321 } 8322 if (Options.ExpandStructures()) { 8323 S += '='; 8324 if (!RDecl->isUnion()) { 8325 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8326 } else { 8327 for (const auto *Field : RDecl->fields()) { 8328 if (FD) { 8329 S += '"'; 8330 S += Field->getNameAsString(); 8331 S += '"'; 8332 } 8333 8334 // Special case bit-fields. 8335 if (Field->isBitField()) { 8336 getObjCEncodingForTypeImpl(Field->getType(), S, 8337 ObjCEncOptions().setExpandStructures(), 8338 Field); 8339 } else { 8340 QualType qt = Field->getType(); 8341 getLegacyIntegralTypeEncoding(qt); 8342 getObjCEncodingForTypeImpl( 8343 qt, S, 8344 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8345 NotEncodedT); 8346 } 8347 } 8348 } 8349 } 8350 S += RDecl->isUnion() ? ')' : '}'; 8351 return; 8352 } 8353 8354 case Type::BlockPointer: { 8355 const auto *BT = T->castAs<BlockPointerType>(); 8356 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8357 if (Options.EncodeBlockParameters()) { 8358 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8359 8360 S += '<'; 8361 // Block return type 8362 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8363 Options.forComponentType(), FD, NotEncodedT); 8364 // Block self 8365 S += "@?"; 8366 // Block parameters 8367 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8368 for (const auto &I : FPT->param_types()) 8369 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8370 NotEncodedT); 8371 } 8372 S += '>'; 8373 } 8374 return; 8375 } 8376 8377 case Type::ObjCObject: { 8378 // hack to match legacy encoding of *id and *Class 8379 QualType Ty = getObjCObjectPointerType(CT); 8380 if (Ty->isObjCIdType()) { 8381 S += "{objc_object=}"; 8382 return; 8383 } 8384 else if (Ty->isObjCClassType()) { 8385 S += "{objc_class=}"; 8386 return; 8387 } 8388 // TODO: Double check to make sure this intentionally falls through. 8389 [[fallthrough]]; 8390 } 8391 8392 case Type::ObjCInterface: { 8393 // Ignore protocol qualifiers when mangling at this level. 8394 // @encode(class_name) 8395 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8396 S += '{'; 8397 S += OI->getObjCRuntimeNameAsString(); 8398 if (Options.ExpandStructures()) { 8399 S += '='; 8400 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8401 DeepCollectObjCIvars(OI, true, Ivars); 8402 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8403 const FieldDecl *Field = Ivars[i]; 8404 if (Field->isBitField()) 8405 getObjCEncodingForTypeImpl(Field->getType(), S, 8406 ObjCEncOptions().setExpandStructures(), 8407 Field); 8408 else 8409 getObjCEncodingForTypeImpl(Field->getType(), S, 8410 ObjCEncOptions().setExpandStructures(), FD, 8411 NotEncodedT); 8412 } 8413 } 8414 S += '}'; 8415 return; 8416 } 8417 8418 case Type::ObjCObjectPointer: { 8419 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8420 if (OPT->isObjCIdType()) { 8421 S += '@'; 8422 return; 8423 } 8424 8425 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8426 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8427 // Since this is a binary compatibility issue, need to consult with 8428 // runtime folks. Fortunately, this is a *very* obscure construct. 8429 S += '#'; 8430 return; 8431 } 8432 8433 if (OPT->isObjCQualifiedIdType()) { 8434 getObjCEncodingForTypeImpl( 8435 getObjCIdType(), S, 8436 Options.keepingOnly(ObjCEncOptions() 8437 .setExpandPointedToStructures() 8438 .setExpandStructures()), 8439 FD); 8440 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8441 // Note that we do extended encoding of protocol qualifier list 8442 // Only when doing ivar or property encoding. 8443 S += '"'; 8444 for (const auto *I : OPT->quals()) { 8445 S += '<'; 8446 S += I->getObjCRuntimeNameAsString(); 8447 S += '>'; 8448 } 8449 S += '"'; 8450 } 8451 return; 8452 } 8453 8454 S += '@'; 8455 if (OPT->getInterfaceDecl() && 8456 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8457 S += '"'; 8458 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8459 for (const auto *I : OPT->quals()) { 8460 S += '<'; 8461 S += I->getObjCRuntimeNameAsString(); 8462 S += '>'; 8463 } 8464 S += '"'; 8465 } 8466 return; 8467 } 8468 8469 // gcc just blithely ignores member pointers. 8470 // FIXME: we should do better than that. 'M' is available. 8471 case Type::MemberPointer: 8472 // This matches gcc's encoding, even though technically it is insufficient. 8473 //FIXME. We should do a better job than gcc. 8474 case Type::Vector: 8475 case Type::ExtVector: 8476 // Until we have a coherent encoding of these three types, issue warning. 8477 if (NotEncodedT) 8478 *NotEncodedT = T; 8479 return; 8480 8481 case Type::ConstantMatrix: 8482 if (NotEncodedT) 8483 *NotEncodedT = T; 8484 return; 8485 8486 case Type::BitInt: 8487 if (NotEncodedT) 8488 *NotEncodedT = T; 8489 return; 8490 8491 // We could see an undeduced auto type here during error recovery. 8492 // Just ignore it. 8493 case Type::Auto: 8494 case Type::DeducedTemplateSpecialization: 8495 return; 8496 8497 case Type::Pipe: 8498 #define ABSTRACT_TYPE(KIND, BASE) 8499 #define TYPE(KIND, BASE) 8500 #define DEPENDENT_TYPE(KIND, BASE) \ 8501 case Type::KIND: 8502 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8503 case Type::KIND: 8504 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8505 case Type::KIND: 8506 #include "clang/AST/TypeNodes.inc" 8507 llvm_unreachable("@encode for dependent type!"); 8508 } 8509 llvm_unreachable("bad type kind!"); 8510 } 8511 8512 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8513 std::string &S, 8514 const FieldDecl *FD, 8515 bool includeVBases, 8516 QualType *NotEncodedT) const { 8517 assert(RDecl && "Expected non-null RecordDecl"); 8518 assert(!RDecl->isUnion() && "Should not be called for unions"); 8519 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8520 return; 8521 8522 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8523 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8524 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8525 8526 if (CXXRec) { 8527 for (const auto &BI : CXXRec->bases()) { 8528 if (!BI.isVirtual()) { 8529 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8530 if (base->isEmpty()) 8531 continue; 8532 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8533 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8534 std::make_pair(offs, base)); 8535 } 8536 } 8537 } 8538 8539 for (FieldDecl *Field : RDecl->fields()) { 8540 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8541 continue; 8542 uint64_t offs = layout.getFieldOffset(Field->getFieldIndex()); 8543 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8544 std::make_pair(offs, Field)); 8545 } 8546 8547 if (CXXRec && includeVBases) { 8548 for (const auto &BI : CXXRec->vbases()) { 8549 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8550 if (base->isEmpty()) 8551 continue; 8552 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8553 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8554 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8555 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8556 std::make_pair(offs, base)); 8557 } 8558 } 8559 8560 CharUnits size; 8561 if (CXXRec) { 8562 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8563 } else { 8564 size = layout.getSize(); 8565 } 8566 8567 #ifndef NDEBUG 8568 uint64_t CurOffs = 0; 8569 #endif 8570 std::multimap<uint64_t, NamedDecl *>::iterator 8571 CurLayObj = FieldOrBaseOffsets.begin(); 8572 8573 if (CXXRec && CXXRec->isDynamicClass() && 8574 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8575 if (FD) { 8576 S += "\"_vptr$"; 8577 std::string recname = CXXRec->getNameAsString(); 8578 if (recname.empty()) recname = "?"; 8579 S += recname; 8580 S += '"'; 8581 } 8582 S += "^^?"; 8583 #ifndef NDEBUG 8584 CurOffs += getTypeSize(VoidPtrTy); 8585 #endif 8586 } 8587 8588 if (!RDecl->hasFlexibleArrayMember()) { 8589 // Mark the end of the structure. 8590 uint64_t offs = toBits(size); 8591 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8592 std::make_pair(offs, nullptr)); 8593 } 8594 8595 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8596 #ifndef NDEBUG 8597 assert(CurOffs <= CurLayObj->first); 8598 if (CurOffs < CurLayObj->first) { 8599 uint64_t padding = CurLayObj->first - CurOffs; 8600 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8601 // packing/alignment of members is different that normal, in which case 8602 // the encoding will be out-of-sync with the real layout. 8603 // If the runtime switches to just consider the size of types without 8604 // taking into account alignment, we could make padding explicit in the 8605 // encoding (e.g. using arrays of chars). The encoding strings would be 8606 // longer then though. 8607 CurOffs += padding; 8608 } 8609 #endif 8610 8611 NamedDecl *dcl = CurLayObj->second; 8612 if (!dcl) 8613 break; // reached end of structure. 8614 8615 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8616 // We expand the bases without their virtual bases since those are going 8617 // in the initial structure. Note that this differs from gcc which 8618 // expands virtual bases each time one is encountered in the hierarchy, 8619 // making the encoding type bigger than it really is. 8620 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8621 NotEncodedT); 8622 assert(!base->isEmpty()); 8623 #ifndef NDEBUG 8624 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8625 #endif 8626 } else { 8627 const auto *field = cast<FieldDecl>(dcl); 8628 if (FD) { 8629 S += '"'; 8630 S += field->getNameAsString(); 8631 S += '"'; 8632 } 8633 8634 if (field->isBitField()) { 8635 EncodeBitField(this, S, field->getType(), field); 8636 #ifndef NDEBUG 8637 CurOffs += field->getBitWidthValue(*this); 8638 #endif 8639 } else { 8640 QualType qt = field->getType(); 8641 getLegacyIntegralTypeEncoding(qt); 8642 getObjCEncodingForTypeImpl( 8643 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8644 FD, NotEncodedT); 8645 #ifndef NDEBUG 8646 CurOffs += getTypeSize(field->getType()); 8647 #endif 8648 } 8649 } 8650 } 8651 } 8652 8653 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8654 std::string& S) const { 8655 if (QT & Decl::OBJC_TQ_In) 8656 S += 'n'; 8657 if (QT & Decl::OBJC_TQ_Inout) 8658 S += 'N'; 8659 if (QT & Decl::OBJC_TQ_Out) 8660 S += 'o'; 8661 if (QT & Decl::OBJC_TQ_Bycopy) 8662 S += 'O'; 8663 if (QT & Decl::OBJC_TQ_Byref) 8664 S += 'R'; 8665 if (QT & Decl::OBJC_TQ_Oneway) 8666 S += 'V'; 8667 } 8668 8669 TypedefDecl *ASTContext::getObjCIdDecl() const { 8670 if (!ObjCIdDecl) { 8671 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8672 T = getObjCObjectPointerType(T); 8673 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8674 } 8675 return ObjCIdDecl; 8676 } 8677 8678 TypedefDecl *ASTContext::getObjCSelDecl() const { 8679 if (!ObjCSelDecl) { 8680 QualType T = getPointerType(ObjCBuiltinSelTy); 8681 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8682 } 8683 return ObjCSelDecl; 8684 } 8685 8686 TypedefDecl *ASTContext::getObjCClassDecl() const { 8687 if (!ObjCClassDecl) { 8688 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8689 T = getObjCObjectPointerType(T); 8690 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8691 } 8692 return ObjCClassDecl; 8693 } 8694 8695 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8696 if (!ObjCProtocolClassDecl) { 8697 ObjCProtocolClassDecl 8698 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8699 SourceLocation(), 8700 &Idents.get("Protocol"), 8701 /*typeParamList=*/nullptr, 8702 /*PrevDecl=*/nullptr, 8703 SourceLocation(), true); 8704 } 8705 8706 return ObjCProtocolClassDecl; 8707 } 8708 8709 //===----------------------------------------------------------------------===// 8710 // __builtin_va_list Construction Functions 8711 //===----------------------------------------------------------------------===// 8712 8713 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8714 StringRef Name) { 8715 // typedef char* __builtin[_ms]_va_list; 8716 QualType T = Context->getPointerType(Context->CharTy); 8717 return Context->buildImplicitTypedef(T, Name); 8718 } 8719 8720 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8721 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8722 } 8723 8724 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8725 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8726 } 8727 8728 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8729 // typedef void* __builtin_va_list; 8730 QualType T = Context->getPointerType(Context->VoidTy); 8731 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8732 } 8733 8734 static TypedefDecl * 8735 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8736 // struct __va_list 8737 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8738 if (Context->getLangOpts().CPlusPlus) { 8739 // namespace std { struct __va_list { 8740 auto *NS = NamespaceDecl::Create( 8741 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8742 /*Inline=*/false, SourceLocation(), SourceLocation(), 8743 &Context->Idents.get("std"), 8744 /*PrevDecl=*/nullptr, /*Nested=*/false); 8745 NS->setImplicit(); 8746 VaListTagDecl->setDeclContext(NS); 8747 } 8748 8749 VaListTagDecl->startDefinition(); 8750 8751 const size_t NumFields = 5; 8752 QualType FieldTypes[NumFields]; 8753 const char *FieldNames[NumFields]; 8754 8755 // void *__stack; 8756 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8757 FieldNames[0] = "__stack"; 8758 8759 // void *__gr_top; 8760 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8761 FieldNames[1] = "__gr_top"; 8762 8763 // void *__vr_top; 8764 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8765 FieldNames[2] = "__vr_top"; 8766 8767 // int __gr_offs; 8768 FieldTypes[3] = Context->IntTy; 8769 FieldNames[3] = "__gr_offs"; 8770 8771 // int __vr_offs; 8772 FieldTypes[4] = Context->IntTy; 8773 FieldNames[4] = "__vr_offs"; 8774 8775 // Create fields 8776 for (unsigned i = 0; i < NumFields; ++i) { 8777 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8778 VaListTagDecl, 8779 SourceLocation(), 8780 SourceLocation(), 8781 &Context->Idents.get(FieldNames[i]), 8782 FieldTypes[i], /*TInfo=*/nullptr, 8783 /*BitWidth=*/nullptr, 8784 /*Mutable=*/false, 8785 ICIS_NoInit); 8786 Field->setAccess(AS_public); 8787 VaListTagDecl->addDecl(Field); 8788 } 8789 VaListTagDecl->completeDefinition(); 8790 Context->VaListTagDecl = VaListTagDecl; 8791 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8792 8793 // } __builtin_va_list; 8794 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8795 } 8796 8797 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8798 // typedef struct __va_list_tag { 8799 RecordDecl *VaListTagDecl; 8800 8801 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8802 VaListTagDecl->startDefinition(); 8803 8804 const size_t NumFields = 5; 8805 QualType FieldTypes[NumFields]; 8806 const char *FieldNames[NumFields]; 8807 8808 // unsigned char gpr; 8809 FieldTypes[0] = Context->UnsignedCharTy; 8810 FieldNames[0] = "gpr"; 8811 8812 // unsigned char fpr; 8813 FieldTypes[1] = Context->UnsignedCharTy; 8814 FieldNames[1] = "fpr"; 8815 8816 // unsigned short reserved; 8817 FieldTypes[2] = Context->UnsignedShortTy; 8818 FieldNames[2] = "reserved"; 8819 8820 // void* overflow_arg_area; 8821 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8822 FieldNames[3] = "overflow_arg_area"; 8823 8824 // void* reg_save_area; 8825 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8826 FieldNames[4] = "reg_save_area"; 8827 8828 // Create fields 8829 for (unsigned i = 0; i < NumFields; ++i) { 8830 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8831 SourceLocation(), 8832 SourceLocation(), 8833 &Context->Idents.get(FieldNames[i]), 8834 FieldTypes[i], /*TInfo=*/nullptr, 8835 /*BitWidth=*/nullptr, 8836 /*Mutable=*/false, 8837 ICIS_NoInit); 8838 Field->setAccess(AS_public); 8839 VaListTagDecl->addDecl(Field); 8840 } 8841 VaListTagDecl->completeDefinition(); 8842 Context->VaListTagDecl = VaListTagDecl; 8843 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8844 8845 // } __va_list_tag; 8846 TypedefDecl *VaListTagTypedefDecl = 8847 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8848 8849 QualType VaListTagTypedefType = 8850 Context->getTypedefType(VaListTagTypedefDecl); 8851 8852 // typedef __va_list_tag __builtin_va_list[1]; 8853 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8854 QualType VaListTagArrayType = Context->getConstantArrayType( 8855 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 8856 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8857 } 8858 8859 static TypedefDecl * 8860 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8861 // struct __va_list_tag { 8862 RecordDecl *VaListTagDecl; 8863 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8864 VaListTagDecl->startDefinition(); 8865 8866 const size_t NumFields = 4; 8867 QualType FieldTypes[NumFields]; 8868 const char *FieldNames[NumFields]; 8869 8870 // unsigned gp_offset; 8871 FieldTypes[0] = Context->UnsignedIntTy; 8872 FieldNames[0] = "gp_offset"; 8873 8874 // unsigned fp_offset; 8875 FieldTypes[1] = Context->UnsignedIntTy; 8876 FieldNames[1] = "fp_offset"; 8877 8878 // void* overflow_arg_area; 8879 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8880 FieldNames[2] = "overflow_arg_area"; 8881 8882 // void* reg_save_area; 8883 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8884 FieldNames[3] = "reg_save_area"; 8885 8886 // Create fields 8887 for (unsigned i = 0; i < NumFields; ++i) { 8888 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8889 VaListTagDecl, 8890 SourceLocation(), 8891 SourceLocation(), 8892 &Context->Idents.get(FieldNames[i]), 8893 FieldTypes[i], /*TInfo=*/nullptr, 8894 /*BitWidth=*/nullptr, 8895 /*Mutable=*/false, 8896 ICIS_NoInit); 8897 Field->setAccess(AS_public); 8898 VaListTagDecl->addDecl(Field); 8899 } 8900 VaListTagDecl->completeDefinition(); 8901 Context->VaListTagDecl = VaListTagDecl; 8902 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8903 8904 // }; 8905 8906 // typedef struct __va_list_tag __builtin_va_list[1]; 8907 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8908 QualType VaListTagArrayType = Context->getConstantArrayType( 8909 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 8910 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8911 } 8912 8913 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8914 // typedef int __builtin_va_list[4]; 8915 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8916 QualType IntArrayType = Context->getConstantArrayType( 8917 Context->IntTy, Size, nullptr, ArraySizeModifier::Normal, 0); 8918 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8919 } 8920 8921 static TypedefDecl * 8922 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8923 // struct __va_list 8924 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8925 if (Context->getLangOpts().CPlusPlus) { 8926 // namespace std { struct __va_list { 8927 NamespaceDecl *NS; 8928 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8929 Context->getTranslationUnitDecl(), 8930 /*Inline=*/false, SourceLocation(), 8931 SourceLocation(), &Context->Idents.get("std"), 8932 /*PrevDecl=*/nullptr, /*Nested=*/false); 8933 NS->setImplicit(); 8934 VaListDecl->setDeclContext(NS); 8935 } 8936 8937 VaListDecl->startDefinition(); 8938 8939 // void * __ap; 8940 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8941 VaListDecl, 8942 SourceLocation(), 8943 SourceLocation(), 8944 &Context->Idents.get("__ap"), 8945 Context->getPointerType(Context->VoidTy), 8946 /*TInfo=*/nullptr, 8947 /*BitWidth=*/nullptr, 8948 /*Mutable=*/false, 8949 ICIS_NoInit); 8950 Field->setAccess(AS_public); 8951 VaListDecl->addDecl(Field); 8952 8953 // }; 8954 VaListDecl->completeDefinition(); 8955 Context->VaListTagDecl = VaListDecl; 8956 8957 // typedef struct __va_list __builtin_va_list; 8958 QualType T = Context->getRecordType(VaListDecl); 8959 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8960 } 8961 8962 static TypedefDecl * 8963 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8964 // struct __va_list_tag { 8965 RecordDecl *VaListTagDecl; 8966 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8967 VaListTagDecl->startDefinition(); 8968 8969 const size_t NumFields = 4; 8970 QualType FieldTypes[NumFields]; 8971 const char *FieldNames[NumFields]; 8972 8973 // long __gpr; 8974 FieldTypes[0] = Context->LongTy; 8975 FieldNames[0] = "__gpr"; 8976 8977 // long __fpr; 8978 FieldTypes[1] = Context->LongTy; 8979 FieldNames[1] = "__fpr"; 8980 8981 // void *__overflow_arg_area; 8982 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8983 FieldNames[2] = "__overflow_arg_area"; 8984 8985 // void *__reg_save_area; 8986 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8987 FieldNames[3] = "__reg_save_area"; 8988 8989 // Create fields 8990 for (unsigned i = 0; i < NumFields; ++i) { 8991 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8992 VaListTagDecl, 8993 SourceLocation(), 8994 SourceLocation(), 8995 &Context->Idents.get(FieldNames[i]), 8996 FieldTypes[i], /*TInfo=*/nullptr, 8997 /*BitWidth=*/nullptr, 8998 /*Mutable=*/false, 8999 ICIS_NoInit); 9000 Field->setAccess(AS_public); 9001 VaListTagDecl->addDecl(Field); 9002 } 9003 VaListTagDecl->completeDefinition(); 9004 Context->VaListTagDecl = VaListTagDecl; 9005 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9006 9007 // }; 9008 9009 // typedef __va_list_tag __builtin_va_list[1]; 9010 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9011 QualType VaListTagArrayType = Context->getConstantArrayType( 9012 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 9013 9014 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9015 } 9016 9017 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 9018 // typedef struct __va_list_tag { 9019 RecordDecl *VaListTagDecl; 9020 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9021 VaListTagDecl->startDefinition(); 9022 9023 const size_t NumFields = 3; 9024 QualType FieldTypes[NumFields]; 9025 const char *FieldNames[NumFields]; 9026 9027 // void *CurrentSavedRegisterArea; 9028 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 9029 FieldNames[0] = "__current_saved_reg_area_pointer"; 9030 9031 // void *SavedRegAreaEnd; 9032 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 9033 FieldNames[1] = "__saved_reg_area_end_pointer"; 9034 9035 // void *OverflowArea; 9036 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9037 FieldNames[2] = "__overflow_area_pointer"; 9038 9039 // Create fields 9040 for (unsigned i = 0; i < NumFields; ++i) { 9041 FieldDecl *Field = FieldDecl::Create( 9042 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 9043 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 9044 /*TInfo=*/nullptr, 9045 /*BitWidth=*/nullptr, 9046 /*Mutable=*/false, ICIS_NoInit); 9047 Field->setAccess(AS_public); 9048 VaListTagDecl->addDecl(Field); 9049 } 9050 VaListTagDecl->completeDefinition(); 9051 Context->VaListTagDecl = VaListTagDecl; 9052 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9053 9054 // } __va_list_tag; 9055 TypedefDecl *VaListTagTypedefDecl = 9056 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 9057 9058 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 9059 9060 // typedef __va_list_tag __builtin_va_list[1]; 9061 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9062 QualType VaListTagArrayType = Context->getConstantArrayType( 9063 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 9064 9065 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9066 } 9067 9068 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 9069 TargetInfo::BuiltinVaListKind Kind) { 9070 switch (Kind) { 9071 case TargetInfo::CharPtrBuiltinVaList: 9072 return CreateCharPtrBuiltinVaListDecl(Context); 9073 case TargetInfo::VoidPtrBuiltinVaList: 9074 return CreateVoidPtrBuiltinVaListDecl(Context); 9075 case TargetInfo::AArch64ABIBuiltinVaList: 9076 return CreateAArch64ABIBuiltinVaListDecl(Context); 9077 case TargetInfo::PowerABIBuiltinVaList: 9078 return CreatePowerABIBuiltinVaListDecl(Context); 9079 case TargetInfo::X86_64ABIBuiltinVaList: 9080 return CreateX86_64ABIBuiltinVaListDecl(Context); 9081 case TargetInfo::PNaClABIBuiltinVaList: 9082 return CreatePNaClABIBuiltinVaListDecl(Context); 9083 case TargetInfo::AAPCSABIBuiltinVaList: 9084 return CreateAAPCSABIBuiltinVaListDecl(Context); 9085 case TargetInfo::SystemZBuiltinVaList: 9086 return CreateSystemZBuiltinVaListDecl(Context); 9087 case TargetInfo::HexagonBuiltinVaList: 9088 return CreateHexagonBuiltinVaListDecl(Context); 9089 } 9090 9091 llvm_unreachable("Unhandled __builtin_va_list type kind"); 9092 } 9093 9094 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 9095 if (!BuiltinVaListDecl) { 9096 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 9097 assert(BuiltinVaListDecl->isImplicit()); 9098 } 9099 9100 return BuiltinVaListDecl; 9101 } 9102 9103 Decl *ASTContext::getVaListTagDecl() const { 9104 // Force the creation of VaListTagDecl by building the __builtin_va_list 9105 // declaration. 9106 if (!VaListTagDecl) 9107 (void)getBuiltinVaListDecl(); 9108 9109 return VaListTagDecl; 9110 } 9111 9112 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 9113 if (!BuiltinMSVaListDecl) 9114 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 9115 9116 return BuiltinMSVaListDecl; 9117 } 9118 9119 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 9120 // Allow redecl custom type checking builtin for HLSL. 9121 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin && 9122 BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID())) 9123 return true; 9124 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 9125 } 9126 9127 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 9128 assert(ObjCConstantStringType.isNull() && 9129 "'NSConstantString' type already set!"); 9130 9131 ObjCConstantStringType = getObjCInterfaceType(Decl); 9132 } 9133 9134 /// Retrieve the template name that corresponds to a non-empty 9135 /// lookup. 9136 TemplateName 9137 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 9138 UnresolvedSetIterator End) const { 9139 unsigned size = End - Begin; 9140 assert(size > 1 && "set is not overloaded!"); 9141 9142 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 9143 size * sizeof(FunctionTemplateDecl*)); 9144 auto *OT = new (memory) OverloadedTemplateStorage(size); 9145 9146 NamedDecl **Storage = OT->getStorage(); 9147 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 9148 NamedDecl *D = *I; 9149 assert(isa<FunctionTemplateDecl>(D) || 9150 isa<UnresolvedUsingValueDecl>(D) || 9151 (isa<UsingShadowDecl>(D) && 9152 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 9153 *Storage++ = D; 9154 } 9155 9156 return TemplateName(OT); 9157 } 9158 9159 /// Retrieve a template name representing an unqualified-id that has been 9160 /// assumed to name a template for ADL purposes. 9161 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 9162 auto *OT = new (*this) AssumedTemplateStorage(Name); 9163 return TemplateName(OT); 9164 } 9165 9166 /// Retrieve the template name that represents a qualified 9167 /// template name such as \c std::vector. 9168 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 9169 bool TemplateKeyword, 9170 TemplateName Template) const { 9171 assert(NNS && "Missing nested-name-specifier in qualified template name"); 9172 9173 // FIXME: Canonicalization? 9174 llvm::FoldingSetNodeID ID; 9175 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9176 9177 void *InsertPos = nullptr; 9178 QualifiedTemplateName *QTN = 9179 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9180 if (!QTN) { 9181 QTN = new (*this, alignof(QualifiedTemplateName)) 9182 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9183 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9184 } 9185 9186 return TemplateName(QTN); 9187 } 9188 9189 /// Retrieve the template name that represents a dependent 9190 /// template name such as \c MetaFun::template apply. 9191 TemplateName 9192 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9193 const IdentifierInfo *Name) const { 9194 assert((!NNS || NNS->isDependent()) && 9195 "Nested name specifier must be dependent"); 9196 9197 llvm::FoldingSetNodeID ID; 9198 DependentTemplateName::Profile(ID, NNS, Name); 9199 9200 void *InsertPos = nullptr; 9201 DependentTemplateName *QTN = 9202 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9203 9204 if (QTN) 9205 return TemplateName(QTN); 9206 9207 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9208 if (CanonNNS == NNS) { 9209 QTN = new (*this, alignof(DependentTemplateName)) 9210 DependentTemplateName(NNS, Name); 9211 } else { 9212 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9213 QTN = new (*this, alignof(DependentTemplateName)) 9214 DependentTemplateName(NNS, Name, Canon); 9215 DependentTemplateName *CheckQTN = 9216 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9217 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9218 (void)CheckQTN; 9219 } 9220 9221 DependentTemplateNames.InsertNode(QTN, InsertPos); 9222 return TemplateName(QTN); 9223 } 9224 9225 /// Retrieve the template name that represents a dependent 9226 /// template name such as \c MetaFun::template operator+. 9227 TemplateName 9228 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9229 OverloadedOperatorKind Operator) const { 9230 assert((!NNS || NNS->isDependent()) && 9231 "Nested name specifier must be dependent"); 9232 9233 llvm::FoldingSetNodeID ID; 9234 DependentTemplateName::Profile(ID, NNS, Operator); 9235 9236 void *InsertPos = nullptr; 9237 DependentTemplateName *QTN 9238 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9239 9240 if (QTN) 9241 return TemplateName(QTN); 9242 9243 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9244 if (CanonNNS == NNS) { 9245 QTN = new (*this, alignof(DependentTemplateName)) 9246 DependentTemplateName(NNS, Operator); 9247 } else { 9248 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9249 QTN = new (*this, alignof(DependentTemplateName)) 9250 DependentTemplateName(NNS, Operator, Canon); 9251 9252 DependentTemplateName *CheckQTN 9253 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9254 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9255 (void)CheckQTN; 9256 } 9257 9258 DependentTemplateNames.InsertNode(QTN, InsertPos); 9259 return TemplateName(QTN); 9260 } 9261 9262 TemplateName ASTContext::getSubstTemplateTemplateParm( 9263 TemplateName Replacement, Decl *AssociatedDecl, unsigned Index, 9264 std::optional<unsigned> PackIndex) const { 9265 llvm::FoldingSetNodeID ID; 9266 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl, 9267 Index, PackIndex); 9268 9269 void *insertPos = nullptr; 9270 SubstTemplateTemplateParmStorage *subst 9271 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9272 9273 if (!subst) { 9274 subst = new (*this) SubstTemplateTemplateParmStorage( 9275 Replacement, AssociatedDecl, Index, PackIndex); 9276 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9277 } 9278 9279 return TemplateName(subst); 9280 } 9281 9282 TemplateName 9283 ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack, 9284 Decl *AssociatedDecl, 9285 unsigned Index, bool Final) const { 9286 auto &Self = const_cast<ASTContext &>(*this); 9287 llvm::FoldingSetNodeID ID; 9288 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack, 9289 AssociatedDecl, Index, Final); 9290 9291 void *InsertPos = nullptr; 9292 SubstTemplateTemplateParmPackStorage *Subst 9293 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9294 9295 if (!Subst) { 9296 Subst = new (*this) SubstTemplateTemplateParmPackStorage( 9297 ArgPack.pack_elements(), AssociatedDecl, Index, Final); 9298 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9299 } 9300 9301 return TemplateName(Subst); 9302 } 9303 9304 /// getFromTargetType - Given one of the integer types provided by 9305 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9306 /// is actually a value of type @c TargetInfo::IntType. 9307 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9308 switch (Type) { 9309 case TargetInfo::NoInt: return {}; 9310 case TargetInfo::SignedChar: return SignedCharTy; 9311 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9312 case TargetInfo::SignedShort: return ShortTy; 9313 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9314 case TargetInfo::SignedInt: return IntTy; 9315 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9316 case TargetInfo::SignedLong: return LongTy; 9317 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9318 case TargetInfo::SignedLongLong: return LongLongTy; 9319 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9320 } 9321 9322 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9323 } 9324 9325 //===----------------------------------------------------------------------===// 9326 // Type Predicates. 9327 //===----------------------------------------------------------------------===// 9328 9329 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9330 /// garbage collection attribute. 9331 /// 9332 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9333 if (getLangOpts().getGC() == LangOptions::NonGC) 9334 return Qualifiers::GCNone; 9335 9336 assert(getLangOpts().ObjC); 9337 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9338 9339 // Default behaviour under objective-C's gc is for ObjC pointers 9340 // (or pointers to them) be treated as though they were declared 9341 // as __strong. 9342 if (GCAttrs == Qualifiers::GCNone) { 9343 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9344 return Qualifiers::Strong; 9345 else if (Ty->isPointerType()) 9346 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9347 } else { 9348 // It's not valid to set GC attributes on anything that isn't a 9349 // pointer. 9350 #ifndef NDEBUG 9351 QualType CT = Ty->getCanonicalTypeInternal(); 9352 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9353 CT = AT->getElementType(); 9354 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9355 #endif 9356 } 9357 return GCAttrs; 9358 } 9359 9360 //===----------------------------------------------------------------------===// 9361 // Type Compatibility Testing 9362 //===----------------------------------------------------------------------===// 9363 9364 /// areCompatVectorTypes - Return true if the two specified vector types are 9365 /// compatible. 9366 static bool areCompatVectorTypes(const VectorType *LHS, 9367 const VectorType *RHS) { 9368 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9369 return LHS->getElementType() == RHS->getElementType() && 9370 LHS->getNumElements() == RHS->getNumElements(); 9371 } 9372 9373 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9374 /// compatible. 9375 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9376 const ConstantMatrixType *RHS) { 9377 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9378 return LHS->getElementType() == RHS->getElementType() && 9379 LHS->getNumRows() == RHS->getNumRows() && 9380 LHS->getNumColumns() == RHS->getNumColumns(); 9381 } 9382 9383 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9384 QualType SecondVec) { 9385 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9386 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9387 9388 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9389 return true; 9390 9391 // Treat Neon vector types and most AltiVec vector types as if they are the 9392 // equivalent GCC vector types. 9393 const auto *First = FirstVec->castAs<VectorType>(); 9394 const auto *Second = SecondVec->castAs<VectorType>(); 9395 if (First->getNumElements() == Second->getNumElements() && 9396 hasSameType(First->getElementType(), Second->getElementType()) && 9397 First->getVectorKind() != VectorKind::AltiVecPixel && 9398 First->getVectorKind() != VectorKind::AltiVecBool && 9399 Second->getVectorKind() != VectorKind::AltiVecPixel && 9400 Second->getVectorKind() != VectorKind::AltiVecBool && 9401 First->getVectorKind() != VectorKind::SveFixedLengthData && 9402 First->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9403 Second->getVectorKind() != VectorKind::SveFixedLengthData && 9404 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9405 First->getVectorKind() != VectorKind::RVVFixedLengthData && 9406 Second->getVectorKind() != VectorKind::RVVFixedLengthData) 9407 return true; 9408 9409 return false; 9410 } 9411 9412 /// getSVETypeSize - Return SVE vector or predicate register size. 9413 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9414 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type"); 9415 if (Ty->getKind() == BuiltinType::SveBool || 9416 Ty->getKind() == BuiltinType::SveCount) 9417 return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth(); 9418 return Context.getLangOpts().VScaleMin * 128; 9419 } 9420 9421 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9422 QualType SecondType) { 9423 assert( 9424 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9425 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9426 "Expected SVE builtin type and vector type!"); 9427 9428 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9429 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9430 if (const auto *VT = SecondType->getAs<VectorType>()) { 9431 // Predicates have the same representation as uint8 so we also have to 9432 // check the kind to make these types incompatible. 9433 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 9434 return BT->getKind() == BuiltinType::SveBool; 9435 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 9436 return VT->getElementType().getCanonicalType() == 9437 FirstType->getSveEltType(*this); 9438 else if (VT->getVectorKind() == VectorKind::Generic) 9439 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9440 hasSameType(VT->getElementType(), 9441 getBuiltinVectorTypeInfo(BT).ElementType); 9442 } 9443 } 9444 return false; 9445 }; 9446 9447 return IsValidCast(FirstType, SecondType) || 9448 IsValidCast(SecondType, FirstType); 9449 } 9450 9451 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9452 QualType SecondType) { 9453 assert( 9454 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9455 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9456 "Expected SVE builtin type and vector type!"); 9457 9458 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9459 const auto *BT = FirstType->getAs<BuiltinType>(); 9460 if (!BT) 9461 return false; 9462 9463 const auto *VecTy = SecondType->getAs<VectorType>(); 9464 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData || 9465 VecTy->getVectorKind() == VectorKind::Generic)) { 9466 const LangOptions::LaxVectorConversionKind LVCKind = 9467 getLangOpts().getLaxVectorConversions(); 9468 9469 // Can not convert between sve predicates and sve vectors because of 9470 // different size. 9471 if (BT->getKind() == BuiltinType::SveBool && 9472 VecTy->getVectorKind() == VectorKind::SveFixedLengthData) 9473 return false; 9474 9475 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9476 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9477 // converts to VLAT and VLAT implicitly converts to GNUT." 9478 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9479 // predicates. 9480 if (VecTy->getVectorKind() == VectorKind::Generic && 9481 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9482 return false; 9483 9484 // If -flax-vector-conversions=all is specified, the types are 9485 // certainly compatible. 9486 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9487 return true; 9488 9489 // If -flax-vector-conversions=integer is specified, the types are 9490 // compatible if the elements are integer types. 9491 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9492 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9493 FirstType->getSveEltType(*this)->isIntegerType(); 9494 } 9495 9496 return false; 9497 }; 9498 9499 return IsLaxCompatible(FirstType, SecondType) || 9500 IsLaxCompatible(SecondType, FirstType); 9501 } 9502 9503 /// getRVVTypeSize - Return RVV vector register size. 9504 static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) { 9505 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type"); 9506 auto VScale = Context.getTargetInfo().getVScaleRange(Context.getLangOpts()); 9507 if (!VScale) 9508 return 0; 9509 9510 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty); 9511 9512 uint64_t EltSize = Context.getTypeSize(Info.ElementType); 9513 uint64_t MinElts = Info.EC.getKnownMinValue(); 9514 return VScale->first * MinElts * EltSize; 9515 } 9516 9517 bool ASTContext::areCompatibleRVVTypes(QualType FirstType, 9518 QualType SecondType) { 9519 assert( 9520 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9521 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9522 "Expected RVV builtin type and vector type!"); 9523 9524 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9525 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9526 if (const auto *VT = SecondType->getAs<VectorType>()) { 9527 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || 9528 VT->getVectorKind() == VectorKind::Generic) 9529 return FirstType->isRVVVLSBuiltinType() && 9530 getTypeSize(SecondType) == getRVVTypeSize(*this, BT) && 9531 hasSameType(VT->getElementType(), 9532 getBuiltinVectorTypeInfo(BT).ElementType); 9533 } 9534 } 9535 return false; 9536 }; 9537 9538 return IsValidCast(FirstType, SecondType) || 9539 IsValidCast(SecondType, FirstType); 9540 } 9541 9542 bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType, 9543 QualType SecondType) { 9544 assert( 9545 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9546 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9547 "Expected RVV builtin type and vector type!"); 9548 9549 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9550 const auto *BT = FirstType->getAs<BuiltinType>(); 9551 if (!BT) 9552 return false; 9553 9554 if (!BT->isRVVVLSBuiltinType()) 9555 return false; 9556 9557 const auto *VecTy = SecondType->getAs<VectorType>(); 9558 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) { 9559 const LangOptions::LaxVectorConversionKind LVCKind = 9560 getLangOpts().getLaxVectorConversions(); 9561 9562 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion. 9563 if (getTypeSize(SecondType) != getRVVTypeSize(*this, BT)) 9564 return false; 9565 9566 // If -flax-vector-conversions=all is specified, the types are 9567 // certainly compatible. 9568 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9569 return true; 9570 9571 // If -flax-vector-conversions=integer is specified, the types are 9572 // compatible if the elements are integer types. 9573 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9574 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9575 FirstType->getRVVEltType(*this)->isIntegerType(); 9576 } 9577 9578 return false; 9579 }; 9580 9581 return IsLaxCompatible(FirstType, SecondType) || 9582 IsLaxCompatible(SecondType, FirstType); 9583 } 9584 9585 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9586 while (true) { 9587 // __strong id 9588 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9589 if (Attr->getAttrKind() == attr::ObjCOwnership) 9590 return true; 9591 9592 Ty = Attr->getModifiedType(); 9593 9594 // X *__strong (...) 9595 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9596 Ty = Paren->getInnerType(); 9597 9598 // We do not want to look through typedefs, typeof(expr), 9599 // typeof(type), or any other way that the type is somehow 9600 // abstracted. 9601 } else { 9602 return false; 9603 } 9604 } 9605 } 9606 9607 //===----------------------------------------------------------------------===// 9608 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9609 //===----------------------------------------------------------------------===// 9610 9611 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9612 /// inheritance hierarchy of 'rProto'. 9613 bool 9614 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9615 ObjCProtocolDecl *rProto) const { 9616 if (declaresSameEntity(lProto, rProto)) 9617 return true; 9618 for (auto *PI : rProto->protocols()) 9619 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9620 return true; 9621 return false; 9622 } 9623 9624 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9625 /// Class<pr1, ...>. 9626 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9627 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9628 for (auto *lhsProto : lhs->quals()) { 9629 bool match = false; 9630 for (auto *rhsProto : rhs->quals()) { 9631 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9632 match = true; 9633 break; 9634 } 9635 } 9636 if (!match) 9637 return false; 9638 } 9639 return true; 9640 } 9641 9642 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9643 /// ObjCQualifiedIDType. 9644 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9645 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9646 bool compare) { 9647 // Allow id<P..> and an 'id' in all cases. 9648 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9649 return true; 9650 9651 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9652 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9653 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9654 return false; 9655 9656 if (lhs->isObjCQualifiedIdType()) { 9657 if (rhs->qual_empty()) { 9658 // If the RHS is a unqualified interface pointer "NSString*", 9659 // make sure we check the class hierarchy. 9660 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9661 for (auto *I : lhs->quals()) { 9662 // when comparing an id<P> on lhs with a static type on rhs, 9663 // see if static class implements all of id's protocols, directly or 9664 // through its super class and categories. 9665 if (!rhsID->ClassImplementsProtocol(I, true)) 9666 return false; 9667 } 9668 } 9669 // If there are no qualifiers and no interface, we have an 'id'. 9670 return true; 9671 } 9672 // Both the right and left sides have qualifiers. 9673 for (auto *lhsProto : lhs->quals()) { 9674 bool match = false; 9675 9676 // when comparing an id<P> on lhs with a static type on rhs, 9677 // see if static class implements all of id's protocols, directly or 9678 // through its super class and categories. 9679 for (auto *rhsProto : rhs->quals()) { 9680 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9681 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9682 match = true; 9683 break; 9684 } 9685 } 9686 // If the RHS is a qualified interface pointer "NSString<P>*", 9687 // make sure we check the class hierarchy. 9688 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9689 for (auto *I : lhs->quals()) { 9690 // when comparing an id<P> on lhs with a static type on rhs, 9691 // see if static class implements all of id's protocols, directly or 9692 // through its super class and categories. 9693 if (rhsID->ClassImplementsProtocol(I, true)) { 9694 match = true; 9695 break; 9696 } 9697 } 9698 } 9699 if (!match) 9700 return false; 9701 } 9702 9703 return true; 9704 } 9705 9706 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9707 9708 if (lhs->getInterfaceType()) { 9709 // If both the right and left sides have qualifiers. 9710 for (auto *lhsProto : lhs->quals()) { 9711 bool match = false; 9712 9713 // when comparing an id<P> on rhs with a static type on lhs, 9714 // see if static class implements all of id's protocols, directly or 9715 // through its super class and categories. 9716 // First, lhs protocols in the qualifier list must be found, direct 9717 // or indirect in rhs's qualifier list or it is a mismatch. 9718 for (auto *rhsProto : rhs->quals()) { 9719 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9720 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9721 match = true; 9722 break; 9723 } 9724 } 9725 if (!match) 9726 return false; 9727 } 9728 9729 // Static class's protocols, or its super class or category protocols 9730 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9731 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9732 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9733 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9734 // This is rather dubious but matches gcc's behavior. If lhs has 9735 // no type qualifier and its class has no static protocol(s) 9736 // assume that it is mismatch. 9737 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9738 return false; 9739 for (auto *lhsProto : LHSInheritedProtocols) { 9740 bool match = false; 9741 for (auto *rhsProto : rhs->quals()) { 9742 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9743 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9744 match = true; 9745 break; 9746 } 9747 } 9748 if (!match) 9749 return false; 9750 } 9751 } 9752 return true; 9753 } 9754 return false; 9755 } 9756 9757 /// canAssignObjCInterfaces - Return true if the two interface types are 9758 /// compatible for assignment from RHS to LHS. This handles validation of any 9759 /// protocol qualifiers on the LHS or RHS. 9760 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9761 const ObjCObjectPointerType *RHSOPT) { 9762 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9763 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9764 9765 // If either type represents the built-in 'id' type, return true. 9766 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9767 return true; 9768 9769 // Function object that propagates a successful result or handles 9770 // __kindof types. 9771 auto finish = [&](bool succeeded) -> bool { 9772 if (succeeded) 9773 return true; 9774 9775 if (!RHS->isKindOfType()) 9776 return false; 9777 9778 // Strip off __kindof and protocol qualifiers, then check whether 9779 // we can assign the other way. 9780 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9781 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9782 }; 9783 9784 // Casts from or to id<P> are allowed when the other side has compatible 9785 // protocols. 9786 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9787 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9788 } 9789 9790 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9791 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9792 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9793 } 9794 9795 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9796 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9797 return true; 9798 } 9799 9800 // If we have 2 user-defined types, fall into that path. 9801 if (LHS->getInterface() && RHS->getInterface()) { 9802 return finish(canAssignObjCInterfaces(LHS, RHS)); 9803 } 9804 9805 return false; 9806 } 9807 9808 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9809 /// for providing type-safety for objective-c pointers used to pass/return 9810 /// arguments in block literals. When passed as arguments, passing 'A*' where 9811 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9812 /// not OK. For the return type, the opposite is not OK. 9813 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9814 const ObjCObjectPointerType *LHSOPT, 9815 const ObjCObjectPointerType *RHSOPT, 9816 bool BlockReturnType) { 9817 9818 // Function object that propagates a successful result or handles 9819 // __kindof types. 9820 auto finish = [&](bool succeeded) -> bool { 9821 if (succeeded) 9822 return true; 9823 9824 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9825 if (!Expected->isKindOfType()) 9826 return false; 9827 9828 // Strip off __kindof and protocol qualifiers, then check whether 9829 // we can assign the other way. 9830 return canAssignObjCInterfacesInBlockPointer( 9831 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9832 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9833 BlockReturnType); 9834 }; 9835 9836 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9837 return true; 9838 9839 if (LHSOPT->isObjCBuiltinType()) { 9840 return finish(RHSOPT->isObjCBuiltinType() || 9841 RHSOPT->isObjCQualifiedIdType()); 9842 } 9843 9844 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9845 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9846 // Use for block parameters previous type checking for compatibility. 9847 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9848 // Or corrected type checking as in non-compat mode. 9849 (!BlockReturnType && 9850 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9851 else 9852 return finish(ObjCQualifiedIdTypesAreCompatible( 9853 (BlockReturnType ? LHSOPT : RHSOPT), 9854 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9855 } 9856 9857 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9858 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9859 if (LHS && RHS) { // We have 2 user-defined types. 9860 if (LHS != RHS) { 9861 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9862 return finish(BlockReturnType); 9863 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9864 return finish(!BlockReturnType); 9865 } 9866 else 9867 return true; 9868 } 9869 return false; 9870 } 9871 9872 /// Comparison routine for Objective-C protocols to be used with 9873 /// llvm::array_pod_sort. 9874 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9875 ObjCProtocolDecl * const *rhs) { 9876 return (*lhs)->getName().compare((*rhs)->getName()); 9877 } 9878 9879 /// getIntersectionOfProtocols - This routine finds the intersection of set 9880 /// of protocols inherited from two distinct objective-c pointer objects with 9881 /// the given common base. 9882 /// It is used to build composite qualifier list of the composite type of 9883 /// the conditional expression involving two objective-c pointer objects. 9884 static 9885 void getIntersectionOfProtocols(ASTContext &Context, 9886 const ObjCInterfaceDecl *CommonBase, 9887 const ObjCObjectPointerType *LHSOPT, 9888 const ObjCObjectPointerType *RHSOPT, 9889 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9890 9891 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9892 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9893 assert(LHS->getInterface() && "LHS must have an interface base"); 9894 assert(RHS->getInterface() && "RHS must have an interface base"); 9895 9896 // Add all of the protocols for the LHS. 9897 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9898 9899 // Start with the protocol qualifiers. 9900 for (auto *proto : LHS->quals()) { 9901 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9902 } 9903 9904 // Also add the protocols associated with the LHS interface. 9905 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9906 9907 // Add all of the protocols for the RHS. 9908 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9909 9910 // Start with the protocol qualifiers. 9911 for (auto *proto : RHS->quals()) { 9912 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9913 } 9914 9915 // Also add the protocols associated with the RHS interface. 9916 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9917 9918 // Compute the intersection of the collected protocol sets. 9919 for (auto *proto : LHSProtocolSet) { 9920 if (RHSProtocolSet.count(proto)) 9921 IntersectionSet.push_back(proto); 9922 } 9923 9924 // Compute the set of protocols that is implied by either the common type or 9925 // the protocols within the intersection. 9926 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9927 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9928 9929 // Remove any implied protocols from the list of inherited protocols. 9930 if (!ImpliedProtocols.empty()) { 9931 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9932 return ImpliedProtocols.contains(proto); 9933 }); 9934 } 9935 9936 // Sort the remaining protocols by name. 9937 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9938 compareObjCProtocolsByName); 9939 } 9940 9941 /// Determine whether the first type is a subtype of the second. 9942 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9943 QualType rhs) { 9944 // Common case: two object pointers. 9945 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9946 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9947 if (lhsOPT && rhsOPT) 9948 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9949 9950 // Two block pointers. 9951 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9952 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9953 if (lhsBlock && rhsBlock) 9954 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9955 9956 // If either is an unqualified 'id' and the other is a block, it's 9957 // acceptable. 9958 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9959 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9960 return true; 9961 9962 return false; 9963 } 9964 9965 // Check that the given Objective-C type argument lists are equivalent. 9966 static bool sameObjCTypeArgs(ASTContext &ctx, 9967 const ObjCInterfaceDecl *iface, 9968 ArrayRef<QualType> lhsArgs, 9969 ArrayRef<QualType> rhsArgs, 9970 bool stripKindOf) { 9971 if (lhsArgs.size() != rhsArgs.size()) 9972 return false; 9973 9974 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9975 if (!typeParams) 9976 return false; 9977 9978 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9979 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9980 continue; 9981 9982 switch (typeParams->begin()[i]->getVariance()) { 9983 case ObjCTypeParamVariance::Invariant: 9984 if (!stripKindOf || 9985 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9986 rhsArgs[i].stripObjCKindOfType(ctx))) { 9987 return false; 9988 } 9989 break; 9990 9991 case ObjCTypeParamVariance::Covariant: 9992 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 9993 return false; 9994 break; 9995 9996 case ObjCTypeParamVariance::Contravariant: 9997 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 9998 return false; 9999 break; 10000 } 10001 } 10002 10003 return true; 10004 } 10005 10006 QualType ASTContext::areCommonBaseCompatible( 10007 const ObjCObjectPointerType *Lptr, 10008 const ObjCObjectPointerType *Rptr) { 10009 const ObjCObjectType *LHS = Lptr->getObjectType(); 10010 const ObjCObjectType *RHS = Rptr->getObjectType(); 10011 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 10012 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 10013 10014 if (!LDecl || !RDecl) 10015 return {}; 10016 10017 // When either LHS or RHS is a kindof type, we should return a kindof type. 10018 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 10019 // kindof(A). 10020 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 10021 10022 // Follow the left-hand side up the class hierarchy until we either hit a 10023 // root or find the RHS. Record the ancestors in case we don't find it. 10024 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 10025 LHSAncestors; 10026 while (true) { 10027 // Record this ancestor. We'll need this if the common type isn't in the 10028 // path from the LHS to the root. 10029 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 10030 10031 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 10032 // Get the type arguments. 10033 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 10034 bool anyChanges = false; 10035 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10036 // Both have type arguments, compare them. 10037 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10038 LHS->getTypeArgs(), RHS->getTypeArgs(), 10039 /*stripKindOf=*/true)) 10040 return {}; 10041 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10042 // If only one has type arguments, the result will not have type 10043 // arguments. 10044 LHSTypeArgs = {}; 10045 anyChanges = true; 10046 } 10047 10048 // Compute the intersection of protocols. 10049 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10050 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 10051 Protocols); 10052 if (!Protocols.empty()) 10053 anyChanges = true; 10054 10055 // If anything in the LHS will have changed, build a new result type. 10056 // If we need to return a kindof type but LHS is not a kindof type, we 10057 // build a new result type. 10058 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 10059 QualType Result = getObjCInterfaceType(LHS->getInterface()); 10060 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 10061 anyKindOf || LHS->isKindOfType()); 10062 return getObjCObjectPointerType(Result); 10063 } 10064 10065 return getObjCObjectPointerType(QualType(LHS, 0)); 10066 } 10067 10068 // Find the superclass. 10069 QualType LHSSuperType = LHS->getSuperClassType(); 10070 if (LHSSuperType.isNull()) 10071 break; 10072 10073 LHS = LHSSuperType->castAs<ObjCObjectType>(); 10074 } 10075 10076 // We didn't find anything by following the LHS to its root; now check 10077 // the RHS against the cached set of ancestors. 10078 while (true) { 10079 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 10080 if (KnownLHS != LHSAncestors.end()) { 10081 LHS = KnownLHS->second; 10082 10083 // Get the type arguments. 10084 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 10085 bool anyChanges = false; 10086 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10087 // Both have type arguments, compare them. 10088 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10089 LHS->getTypeArgs(), RHS->getTypeArgs(), 10090 /*stripKindOf=*/true)) 10091 return {}; 10092 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10093 // If only one has type arguments, the result will not have type 10094 // arguments. 10095 RHSTypeArgs = {}; 10096 anyChanges = true; 10097 } 10098 10099 // Compute the intersection of protocols. 10100 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10101 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 10102 Protocols); 10103 if (!Protocols.empty()) 10104 anyChanges = true; 10105 10106 // If we need to return a kindof type but RHS is not a kindof type, we 10107 // build a new result type. 10108 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 10109 QualType Result = getObjCInterfaceType(RHS->getInterface()); 10110 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 10111 anyKindOf || RHS->isKindOfType()); 10112 return getObjCObjectPointerType(Result); 10113 } 10114 10115 return getObjCObjectPointerType(QualType(RHS, 0)); 10116 } 10117 10118 // Find the superclass of the RHS. 10119 QualType RHSSuperType = RHS->getSuperClassType(); 10120 if (RHSSuperType.isNull()) 10121 break; 10122 10123 RHS = RHSSuperType->castAs<ObjCObjectType>(); 10124 } 10125 10126 return {}; 10127 } 10128 10129 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 10130 const ObjCObjectType *RHS) { 10131 assert(LHS->getInterface() && "LHS is not an interface type"); 10132 assert(RHS->getInterface() && "RHS is not an interface type"); 10133 10134 // Verify that the base decls are compatible: the RHS must be a subclass of 10135 // the LHS. 10136 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 10137 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 10138 if (!IsSuperClass) 10139 return false; 10140 10141 // If the LHS has protocol qualifiers, determine whether all of them are 10142 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 10143 // LHS). 10144 if (LHS->getNumProtocols() > 0) { 10145 // OK if conversion of LHS to SuperClass results in narrowing of types 10146 // ; i.e., SuperClass may implement at least one of the protocols 10147 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 10148 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 10149 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 10150 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 10151 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 10152 // qualifiers. 10153 for (auto *RHSPI : RHS->quals()) 10154 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 10155 // If there is no protocols associated with RHS, it is not a match. 10156 if (SuperClassInheritedProtocols.empty()) 10157 return false; 10158 10159 for (const auto *LHSProto : LHS->quals()) { 10160 bool SuperImplementsProtocol = false; 10161 for (auto *SuperClassProto : SuperClassInheritedProtocols) 10162 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 10163 SuperImplementsProtocol = true; 10164 break; 10165 } 10166 if (!SuperImplementsProtocol) 10167 return false; 10168 } 10169 } 10170 10171 // If the LHS is specialized, we may need to check type arguments. 10172 if (LHS->isSpecialized()) { 10173 // Follow the superclass chain until we've matched the LHS class in the 10174 // hierarchy. This substitutes type arguments through. 10175 const ObjCObjectType *RHSSuper = RHS; 10176 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 10177 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 10178 10179 // If the RHS is specializd, compare type arguments. 10180 if (RHSSuper->isSpecialized() && 10181 !sameObjCTypeArgs(*this, LHS->getInterface(), 10182 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 10183 /*stripKindOf=*/true)) { 10184 return false; 10185 } 10186 } 10187 10188 return true; 10189 } 10190 10191 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 10192 // get the "pointed to" types 10193 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 10194 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 10195 10196 if (!LHSOPT || !RHSOPT) 10197 return false; 10198 10199 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 10200 canAssignObjCInterfaces(RHSOPT, LHSOPT); 10201 } 10202 10203 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 10204 return canAssignObjCInterfaces( 10205 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 10206 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 10207 } 10208 10209 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 10210 /// both shall have the identically qualified version of a compatible type. 10211 /// C99 6.2.7p1: Two types have compatible types if their types are the 10212 /// same. See 6.7.[2,3,5] for additional rules. 10213 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 10214 bool CompareUnqualified) { 10215 if (getLangOpts().CPlusPlus) 10216 return hasSameType(LHS, RHS); 10217 10218 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 10219 } 10220 10221 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 10222 return typesAreCompatible(LHS, RHS); 10223 } 10224 10225 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 10226 return !mergeTypes(LHS, RHS, true).isNull(); 10227 } 10228 10229 /// mergeTransparentUnionType - if T is a transparent union type and a member 10230 /// of T is compatible with SubType, return the merged type, else return 10231 /// QualType() 10232 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 10233 bool OfBlockPointer, 10234 bool Unqualified) { 10235 if (const RecordType *UT = T->getAsUnionType()) { 10236 RecordDecl *UD = UT->getDecl(); 10237 if (UD->hasAttr<TransparentUnionAttr>()) { 10238 for (const auto *I : UD->fields()) { 10239 QualType ET = I->getType().getUnqualifiedType(); 10240 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 10241 if (!MT.isNull()) 10242 return MT; 10243 } 10244 } 10245 } 10246 10247 return {}; 10248 } 10249 10250 /// mergeFunctionParameterTypes - merge two types which appear as function 10251 /// parameter types 10252 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 10253 bool OfBlockPointer, 10254 bool Unqualified) { 10255 // GNU extension: two types are compatible if they appear as a function 10256 // argument, one of the types is a transparent union type and the other 10257 // type is compatible with a union member 10258 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 10259 Unqualified); 10260 if (!lmerge.isNull()) 10261 return lmerge; 10262 10263 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10264 Unqualified); 10265 if (!rmerge.isNull()) 10266 return rmerge; 10267 10268 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10269 } 10270 10271 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10272 bool OfBlockPointer, bool Unqualified, 10273 bool AllowCXX, 10274 bool IsConditionalOperator) { 10275 const auto *lbase = lhs->castAs<FunctionType>(); 10276 const auto *rbase = rhs->castAs<FunctionType>(); 10277 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10278 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10279 bool allLTypes = true; 10280 bool allRTypes = true; 10281 10282 // Check return type 10283 QualType retType; 10284 if (OfBlockPointer) { 10285 QualType RHS = rbase->getReturnType(); 10286 QualType LHS = lbase->getReturnType(); 10287 bool UnqualifiedResult = Unqualified; 10288 if (!UnqualifiedResult) 10289 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10290 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10291 } 10292 else 10293 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10294 Unqualified); 10295 if (retType.isNull()) 10296 return {}; 10297 10298 if (Unqualified) 10299 retType = retType.getUnqualifiedType(); 10300 10301 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10302 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10303 if (Unqualified) { 10304 LRetType = LRetType.getUnqualifiedType(); 10305 RRetType = RRetType.getUnqualifiedType(); 10306 } 10307 10308 if (getCanonicalType(retType) != LRetType) 10309 allLTypes = false; 10310 if (getCanonicalType(retType) != RRetType) 10311 allRTypes = false; 10312 10313 // FIXME: double check this 10314 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10315 // rbase->getRegParmAttr() != 0 && 10316 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10317 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10318 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10319 10320 // Compatible functions must have compatible calling conventions 10321 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10322 return {}; 10323 10324 // Regparm is part of the calling convention. 10325 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10326 return {}; 10327 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10328 return {}; 10329 10330 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10331 return {}; 10332 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10333 return {}; 10334 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10335 return {}; 10336 10337 // When merging declarations, it's common for supplemental information like 10338 // attributes to only be present in one of the declarations, and we generally 10339 // want type merging to preserve the union of information. So a merged 10340 // function type should be noreturn if it was noreturn in *either* operand 10341 // type. 10342 // 10343 // But for the conditional operator, this is backwards. The result of the 10344 // operator could be either operand, and its type should conservatively 10345 // reflect that. So a function type in a composite type is noreturn only 10346 // if it's noreturn in *both* operand types. 10347 // 10348 // Arguably, noreturn is a kind of subtype, and the conditional operator 10349 // ought to produce the most specific common supertype of its operand types. 10350 // That would differ from this rule in contravariant positions. However, 10351 // neither C nor C++ generally uses this kind of subtype reasoning. Also, 10352 // as a practical matter, it would only affect C code that does abstraction of 10353 // higher-order functions (taking noreturn callbacks!), which is uncommon to 10354 // say the least. So we use the simpler rule. 10355 bool NoReturn = IsConditionalOperator 10356 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn() 10357 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10358 if (lbaseInfo.getNoReturn() != NoReturn) 10359 allLTypes = false; 10360 if (rbaseInfo.getNoReturn() != NoReturn) 10361 allRTypes = false; 10362 10363 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10364 10365 if (lproto && rproto) { // two C99 style function prototypes 10366 assert((AllowCXX || 10367 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10368 "C++ shouldn't be here"); 10369 // Compatible functions must have the same number of parameters 10370 if (lproto->getNumParams() != rproto->getNumParams()) 10371 return {}; 10372 10373 // Variadic and non-variadic functions aren't compatible 10374 if (lproto->isVariadic() != rproto->isVariadic()) 10375 return {}; 10376 10377 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10378 return {}; 10379 10380 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10381 bool canUseLeft, canUseRight; 10382 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10383 newParamInfos)) 10384 return {}; 10385 10386 if (!canUseLeft) 10387 allLTypes = false; 10388 if (!canUseRight) 10389 allRTypes = false; 10390 10391 // Check parameter type compatibility 10392 SmallVector<QualType, 10> types; 10393 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10394 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10395 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10396 QualType paramType = mergeFunctionParameterTypes( 10397 lParamType, rParamType, OfBlockPointer, Unqualified); 10398 if (paramType.isNull()) 10399 return {}; 10400 10401 if (Unqualified) 10402 paramType = paramType.getUnqualifiedType(); 10403 10404 types.push_back(paramType); 10405 if (Unqualified) { 10406 lParamType = lParamType.getUnqualifiedType(); 10407 rParamType = rParamType.getUnqualifiedType(); 10408 } 10409 10410 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10411 allLTypes = false; 10412 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10413 allRTypes = false; 10414 } 10415 10416 if (allLTypes) return lhs; 10417 if (allRTypes) return rhs; 10418 10419 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10420 EPI.ExtInfo = einfo; 10421 EPI.ExtParameterInfos = 10422 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10423 return getFunctionType(retType, types, EPI); 10424 } 10425 10426 if (lproto) allRTypes = false; 10427 if (rproto) allLTypes = false; 10428 10429 const FunctionProtoType *proto = lproto ? lproto : rproto; 10430 if (proto) { 10431 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10432 if (proto->isVariadic()) 10433 return {}; 10434 // Check that the types are compatible with the types that 10435 // would result from default argument promotions (C99 6.7.5.3p15). 10436 // The only types actually affected are promotable integer 10437 // types and floats, which would be passed as a different 10438 // type depending on whether the prototype is visible. 10439 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10440 QualType paramTy = proto->getParamType(i); 10441 10442 // Look at the converted type of enum types, since that is the type used 10443 // to pass enum values. 10444 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10445 paramTy = Enum->getDecl()->getIntegerType(); 10446 if (paramTy.isNull()) 10447 return {}; 10448 } 10449 10450 if (isPromotableIntegerType(paramTy) || 10451 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10452 return {}; 10453 } 10454 10455 if (allLTypes) return lhs; 10456 if (allRTypes) return rhs; 10457 10458 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10459 EPI.ExtInfo = einfo; 10460 return getFunctionType(retType, proto->getParamTypes(), EPI); 10461 } 10462 10463 if (allLTypes) return lhs; 10464 if (allRTypes) return rhs; 10465 return getFunctionNoProtoType(retType, einfo); 10466 } 10467 10468 /// Given that we have an enum type and a non-enum type, try to merge them. 10469 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10470 QualType other, bool isBlockReturnType) { 10471 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10472 // a signed integer type, or an unsigned integer type. 10473 // Compatibility is based on the underlying type, not the promotion 10474 // type. 10475 QualType underlyingType = ET->getDecl()->getIntegerType(); 10476 if (underlyingType.isNull()) 10477 return {}; 10478 if (Context.hasSameType(underlyingType, other)) 10479 return other; 10480 10481 // In block return types, we're more permissive and accept any 10482 // integral type of the same size. 10483 if (isBlockReturnType && other->isIntegerType() && 10484 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10485 return other; 10486 10487 return {}; 10488 } 10489 10490 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, 10491 bool Unqualified, bool BlockReturnType, 10492 bool IsConditionalOperator) { 10493 // For C++ we will not reach this code with reference types (see below), 10494 // for OpenMP variant call overloading we might. 10495 // 10496 // C++ [expr]: If an expression initially has the type "reference to T", the 10497 // type is adjusted to "T" prior to any further analysis, the expression 10498 // designates the object or function denoted by the reference, and the 10499 // expression is an lvalue unless the reference is an rvalue reference and 10500 // the expression is a function call (possibly inside parentheses). 10501 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10502 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10503 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10504 LHS->getTypeClass() == RHS->getTypeClass()) 10505 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10506 OfBlockPointer, Unqualified, BlockReturnType); 10507 if (LHSRefTy || RHSRefTy) 10508 return {}; 10509 10510 if (Unqualified) { 10511 LHS = LHS.getUnqualifiedType(); 10512 RHS = RHS.getUnqualifiedType(); 10513 } 10514 10515 QualType LHSCan = getCanonicalType(LHS), 10516 RHSCan = getCanonicalType(RHS); 10517 10518 // If two types are identical, they are compatible. 10519 if (LHSCan == RHSCan) 10520 return LHS; 10521 10522 // If the qualifiers are different, the types aren't compatible... mostly. 10523 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10524 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10525 if (LQuals != RQuals) { 10526 // If any of these qualifiers are different, we have a type 10527 // mismatch. 10528 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10529 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10530 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10531 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10532 return {}; 10533 10534 // Exactly one GC qualifier difference is allowed: __strong is 10535 // okay if the other type has no GC qualifier but is an Objective 10536 // C object pointer (i.e. implicitly strong by default). We fix 10537 // this by pretending that the unqualified type was actually 10538 // qualified __strong. 10539 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10540 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10541 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10542 10543 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10544 return {}; 10545 10546 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10547 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10548 } 10549 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10550 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10551 } 10552 return {}; 10553 } 10554 10555 // Okay, qualifiers are equal. 10556 10557 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10558 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10559 10560 // We want to consider the two function types to be the same for these 10561 // comparisons, just force one to the other. 10562 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10563 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10564 10565 // Same as above for arrays 10566 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10567 LHSClass = Type::ConstantArray; 10568 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10569 RHSClass = Type::ConstantArray; 10570 10571 // ObjCInterfaces are just specialized ObjCObjects. 10572 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10573 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10574 10575 // Canonicalize ExtVector -> Vector. 10576 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10577 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10578 10579 // If the canonical type classes don't match. 10580 if (LHSClass != RHSClass) { 10581 // Note that we only have special rules for turning block enum 10582 // returns into block int returns, not vice-versa. 10583 if (const auto *ETy = LHS->getAs<EnumType>()) { 10584 return mergeEnumWithInteger(*this, ETy, RHS, false); 10585 } 10586 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10587 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10588 } 10589 // allow block pointer type to match an 'id' type. 10590 if (OfBlockPointer && !BlockReturnType) { 10591 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10592 return LHS; 10593 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10594 return RHS; 10595 } 10596 // Allow __auto_type to match anything; it merges to the type with more 10597 // information. 10598 if (const auto *AT = LHS->getAs<AutoType>()) { 10599 if (!AT->isDeduced() && AT->isGNUAutoType()) 10600 return RHS; 10601 } 10602 if (const auto *AT = RHS->getAs<AutoType>()) { 10603 if (!AT->isDeduced() && AT->isGNUAutoType()) 10604 return LHS; 10605 } 10606 return {}; 10607 } 10608 10609 // The canonical type classes match. 10610 switch (LHSClass) { 10611 #define TYPE(Class, Base) 10612 #define ABSTRACT_TYPE(Class, Base) 10613 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10614 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10615 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10616 #include "clang/AST/TypeNodes.inc" 10617 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10618 10619 case Type::Auto: 10620 case Type::DeducedTemplateSpecialization: 10621 case Type::LValueReference: 10622 case Type::RValueReference: 10623 case Type::MemberPointer: 10624 llvm_unreachable("C++ should never be in mergeTypes"); 10625 10626 case Type::ObjCInterface: 10627 case Type::IncompleteArray: 10628 case Type::VariableArray: 10629 case Type::FunctionProto: 10630 case Type::ExtVector: 10631 llvm_unreachable("Types are eliminated above"); 10632 10633 case Type::Pointer: 10634 { 10635 // Merge two pointer types, while trying to preserve typedef info 10636 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10637 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10638 if (Unqualified) { 10639 LHSPointee = LHSPointee.getUnqualifiedType(); 10640 RHSPointee = RHSPointee.getUnqualifiedType(); 10641 } 10642 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10643 Unqualified); 10644 if (ResultType.isNull()) 10645 return {}; 10646 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10647 return LHS; 10648 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10649 return RHS; 10650 return getPointerType(ResultType); 10651 } 10652 case Type::BlockPointer: 10653 { 10654 // Merge two block pointer types, while trying to preserve typedef info 10655 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10656 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10657 if (Unqualified) { 10658 LHSPointee = LHSPointee.getUnqualifiedType(); 10659 RHSPointee = RHSPointee.getUnqualifiedType(); 10660 } 10661 if (getLangOpts().OpenCL) { 10662 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10663 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10664 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10665 // 6.12.5) thus the following check is asymmetric. 10666 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10667 return {}; 10668 LHSPteeQual.removeAddressSpace(); 10669 RHSPteeQual.removeAddressSpace(); 10670 LHSPointee = 10671 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10672 RHSPointee = 10673 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10674 } 10675 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10676 Unqualified); 10677 if (ResultType.isNull()) 10678 return {}; 10679 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10680 return LHS; 10681 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10682 return RHS; 10683 return getBlockPointerType(ResultType); 10684 } 10685 case Type::Atomic: 10686 { 10687 // Merge two pointer types, while trying to preserve typedef info 10688 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10689 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10690 if (Unqualified) { 10691 LHSValue = LHSValue.getUnqualifiedType(); 10692 RHSValue = RHSValue.getUnqualifiedType(); 10693 } 10694 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10695 Unqualified); 10696 if (ResultType.isNull()) 10697 return {}; 10698 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10699 return LHS; 10700 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10701 return RHS; 10702 return getAtomicType(ResultType); 10703 } 10704 case Type::ConstantArray: 10705 { 10706 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10707 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10708 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10709 return {}; 10710 10711 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10712 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10713 if (Unqualified) { 10714 LHSElem = LHSElem.getUnqualifiedType(); 10715 RHSElem = RHSElem.getUnqualifiedType(); 10716 } 10717 10718 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10719 if (ResultType.isNull()) 10720 return {}; 10721 10722 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10723 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10724 10725 // If either side is a variable array, and both are complete, check whether 10726 // the current dimension is definite. 10727 if (LVAT || RVAT) { 10728 auto SizeFetch = [this](const VariableArrayType* VAT, 10729 const ConstantArrayType* CAT) 10730 -> std::pair<bool,llvm::APInt> { 10731 if (VAT) { 10732 std::optional<llvm::APSInt> TheInt; 10733 Expr *E = VAT->getSizeExpr(); 10734 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10735 return std::make_pair(true, *TheInt); 10736 return std::make_pair(false, llvm::APSInt()); 10737 } 10738 if (CAT) 10739 return std::make_pair(true, CAT->getSize()); 10740 return std::make_pair(false, llvm::APInt()); 10741 }; 10742 10743 bool HaveLSize, HaveRSize; 10744 llvm::APInt LSize, RSize; 10745 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10746 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10747 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10748 return {}; // Definite, but unequal, array dimension 10749 } 10750 10751 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10752 return LHS; 10753 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10754 return RHS; 10755 if (LCAT) 10756 return getConstantArrayType(ResultType, LCAT->getSize(), 10757 LCAT->getSizeExpr(), ArraySizeModifier(), 0); 10758 if (RCAT) 10759 return getConstantArrayType(ResultType, RCAT->getSize(), 10760 RCAT->getSizeExpr(), ArraySizeModifier(), 0); 10761 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10762 return LHS; 10763 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10764 return RHS; 10765 if (LVAT) { 10766 // FIXME: This isn't correct! But tricky to implement because 10767 // the array's size has to be the size of LHS, but the type 10768 // has to be different. 10769 return LHS; 10770 } 10771 if (RVAT) { 10772 // FIXME: This isn't correct! But tricky to implement because 10773 // the array's size has to be the size of RHS, but the type 10774 // has to be different. 10775 return RHS; 10776 } 10777 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10778 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10779 return getIncompleteArrayType(ResultType, ArraySizeModifier(), 0); 10780 } 10781 case Type::FunctionNoProto: 10782 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified, 10783 /*AllowCXX=*/false, IsConditionalOperator); 10784 case Type::Record: 10785 case Type::Enum: 10786 return {}; 10787 case Type::Builtin: 10788 // Only exactly equal builtin types are compatible, which is tested above. 10789 return {}; 10790 case Type::Complex: 10791 // Distinct complex types are incompatible. 10792 return {}; 10793 case Type::Vector: 10794 // FIXME: The merged type should be an ExtVector! 10795 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10796 RHSCan->castAs<VectorType>())) 10797 return LHS; 10798 return {}; 10799 case Type::ConstantMatrix: 10800 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10801 RHSCan->castAs<ConstantMatrixType>())) 10802 return LHS; 10803 return {}; 10804 case Type::ObjCObject: { 10805 // Check if the types are assignment compatible. 10806 // FIXME: This should be type compatibility, e.g. whether 10807 // "LHS x; RHS x;" at global scope is legal. 10808 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10809 RHS->castAs<ObjCObjectType>())) 10810 return LHS; 10811 return {}; 10812 } 10813 case Type::ObjCObjectPointer: 10814 if (OfBlockPointer) { 10815 if (canAssignObjCInterfacesInBlockPointer( 10816 LHS->castAs<ObjCObjectPointerType>(), 10817 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10818 return LHS; 10819 return {}; 10820 } 10821 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10822 RHS->castAs<ObjCObjectPointerType>())) 10823 return LHS; 10824 return {}; 10825 case Type::Pipe: 10826 assert(LHS != RHS && 10827 "Equivalent pipe types should have already been handled!"); 10828 return {}; 10829 case Type::BitInt: { 10830 // Merge two bit-precise int types, while trying to preserve typedef info. 10831 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10832 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10833 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10834 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10835 10836 // Like unsigned/int, shouldn't have a type if they don't match. 10837 if (LHSUnsigned != RHSUnsigned) 10838 return {}; 10839 10840 if (LHSBits != RHSBits) 10841 return {}; 10842 return LHS; 10843 } 10844 } 10845 10846 llvm_unreachable("Invalid Type::Class!"); 10847 } 10848 10849 bool ASTContext::mergeExtParameterInfo( 10850 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10851 bool &CanUseFirst, bool &CanUseSecond, 10852 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10853 assert(NewParamInfos.empty() && "param info list not empty"); 10854 CanUseFirst = CanUseSecond = true; 10855 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10856 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10857 10858 // Fast path: if the first type doesn't have ext parameter infos, 10859 // we match if and only if the second type also doesn't have them. 10860 if (!FirstHasInfo && !SecondHasInfo) 10861 return true; 10862 10863 bool NeedParamInfo = false; 10864 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10865 : SecondFnType->getExtParameterInfos().size(); 10866 10867 for (size_t I = 0; I < E; ++I) { 10868 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10869 if (FirstHasInfo) 10870 FirstParam = FirstFnType->getExtParameterInfo(I); 10871 if (SecondHasInfo) 10872 SecondParam = SecondFnType->getExtParameterInfo(I); 10873 10874 // Cannot merge unless everything except the noescape flag matches. 10875 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10876 return false; 10877 10878 bool FirstNoEscape = FirstParam.isNoEscape(); 10879 bool SecondNoEscape = SecondParam.isNoEscape(); 10880 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10881 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10882 if (NewParamInfos.back().getOpaqueValue()) 10883 NeedParamInfo = true; 10884 if (FirstNoEscape != IsNoEscape) 10885 CanUseFirst = false; 10886 if (SecondNoEscape != IsNoEscape) 10887 CanUseSecond = false; 10888 } 10889 10890 if (!NeedParamInfo) 10891 NewParamInfos.clear(); 10892 10893 return true; 10894 } 10895 10896 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10897 ObjCLayouts[CD] = nullptr; 10898 } 10899 10900 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10901 /// 'RHS' attributes and returns the merged version; including for function 10902 /// return types. 10903 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10904 QualType LHSCan = getCanonicalType(LHS), 10905 RHSCan = getCanonicalType(RHS); 10906 // If two types are identical, they are compatible. 10907 if (LHSCan == RHSCan) 10908 return LHS; 10909 if (RHSCan->isFunctionType()) { 10910 if (!LHSCan->isFunctionType()) 10911 return {}; 10912 QualType OldReturnType = 10913 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10914 QualType NewReturnType = 10915 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10916 QualType ResReturnType = 10917 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10918 if (ResReturnType.isNull()) 10919 return {}; 10920 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10921 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10922 // In either case, use OldReturnType to build the new function type. 10923 const auto *F = LHS->castAs<FunctionType>(); 10924 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10925 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10926 EPI.ExtInfo = getFunctionExtInfo(LHS); 10927 QualType ResultType = 10928 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10929 return ResultType; 10930 } 10931 } 10932 return {}; 10933 } 10934 10935 // If the qualifiers are different, the types can still be merged. 10936 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10937 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10938 if (LQuals != RQuals) { 10939 // If any of these qualifiers are different, we have a type mismatch. 10940 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10941 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10942 return {}; 10943 10944 // Exactly one GC qualifier difference is allowed: __strong is 10945 // okay if the other type has no GC qualifier but is an Objective 10946 // C object pointer (i.e. implicitly strong by default). We fix 10947 // this by pretending that the unqualified type was actually 10948 // qualified __strong. 10949 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10950 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10951 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10952 10953 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10954 return {}; 10955 10956 if (GC_L == Qualifiers::Strong) 10957 return LHS; 10958 if (GC_R == Qualifiers::Strong) 10959 return RHS; 10960 return {}; 10961 } 10962 10963 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10964 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10965 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10966 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10967 if (ResQT == LHSBaseQT) 10968 return LHS; 10969 if (ResQT == RHSBaseQT) 10970 return RHS; 10971 } 10972 return {}; 10973 } 10974 10975 //===----------------------------------------------------------------------===// 10976 // Integer Predicates 10977 //===----------------------------------------------------------------------===// 10978 10979 unsigned ASTContext::getIntWidth(QualType T) const { 10980 if (const auto *ET = T->getAs<EnumType>()) 10981 T = ET->getDecl()->getIntegerType(); 10982 if (T->isBooleanType()) 10983 return 1; 10984 if (const auto *EIT = T->getAs<BitIntType>()) 10985 return EIT->getNumBits(); 10986 // For builtin types, just use the standard type sizing method 10987 return (unsigned)getTypeSize(T); 10988 } 10989 10990 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10991 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 10992 T->isFixedPointType()) && 10993 "Unexpected type"); 10994 10995 // Turn <4 x signed int> -> <4 x unsigned int> 10996 if (const auto *VTy = T->getAs<VectorType>()) 10997 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 10998 VTy->getNumElements(), VTy->getVectorKind()); 10999 11000 // For _BitInt, return an unsigned _BitInt with same width. 11001 if (const auto *EITy = T->getAs<BitIntType>()) 11002 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 11003 11004 // For enums, get the underlying integer type of the enum, and let the general 11005 // integer type signchanging code handle it. 11006 if (const auto *ETy = T->getAs<EnumType>()) 11007 T = ETy->getDecl()->getIntegerType(); 11008 11009 switch (T->castAs<BuiltinType>()->getKind()) { 11010 case BuiltinType::Char_U: 11011 // Plain `char` is mapped to `unsigned char` even if it's already unsigned 11012 case BuiltinType::Char_S: 11013 case BuiltinType::SChar: 11014 case BuiltinType::Char8: 11015 return UnsignedCharTy; 11016 case BuiltinType::Short: 11017 return UnsignedShortTy; 11018 case BuiltinType::Int: 11019 return UnsignedIntTy; 11020 case BuiltinType::Long: 11021 return UnsignedLongTy; 11022 case BuiltinType::LongLong: 11023 return UnsignedLongLongTy; 11024 case BuiltinType::Int128: 11025 return UnsignedInt128Ty; 11026 // wchar_t is special. It is either signed or not, but when it's signed, 11027 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 11028 // version of its underlying type instead. 11029 case BuiltinType::WChar_S: 11030 return getUnsignedWCharType(); 11031 11032 case BuiltinType::ShortAccum: 11033 return UnsignedShortAccumTy; 11034 case BuiltinType::Accum: 11035 return UnsignedAccumTy; 11036 case BuiltinType::LongAccum: 11037 return UnsignedLongAccumTy; 11038 case BuiltinType::SatShortAccum: 11039 return SatUnsignedShortAccumTy; 11040 case BuiltinType::SatAccum: 11041 return SatUnsignedAccumTy; 11042 case BuiltinType::SatLongAccum: 11043 return SatUnsignedLongAccumTy; 11044 case BuiltinType::ShortFract: 11045 return UnsignedShortFractTy; 11046 case BuiltinType::Fract: 11047 return UnsignedFractTy; 11048 case BuiltinType::LongFract: 11049 return UnsignedLongFractTy; 11050 case BuiltinType::SatShortFract: 11051 return SatUnsignedShortFractTy; 11052 case BuiltinType::SatFract: 11053 return SatUnsignedFractTy; 11054 case BuiltinType::SatLongFract: 11055 return SatUnsignedLongFractTy; 11056 default: 11057 assert((T->hasUnsignedIntegerRepresentation() || 11058 T->isUnsignedFixedPointType()) && 11059 "Unexpected signed integer or fixed point type"); 11060 return T; 11061 } 11062 } 11063 11064 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 11065 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11066 T->isFixedPointType()) && 11067 "Unexpected type"); 11068 11069 // Turn <4 x unsigned int> -> <4 x signed int> 11070 if (const auto *VTy = T->getAs<VectorType>()) 11071 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 11072 VTy->getNumElements(), VTy->getVectorKind()); 11073 11074 // For _BitInt, return a signed _BitInt with same width. 11075 if (const auto *EITy = T->getAs<BitIntType>()) 11076 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 11077 11078 // For enums, get the underlying integer type of the enum, and let the general 11079 // integer type signchanging code handle it. 11080 if (const auto *ETy = T->getAs<EnumType>()) 11081 T = ETy->getDecl()->getIntegerType(); 11082 11083 switch (T->castAs<BuiltinType>()->getKind()) { 11084 case BuiltinType::Char_S: 11085 // Plain `char` is mapped to `signed char` even if it's already signed 11086 case BuiltinType::Char_U: 11087 case BuiltinType::UChar: 11088 case BuiltinType::Char8: 11089 return SignedCharTy; 11090 case BuiltinType::UShort: 11091 return ShortTy; 11092 case BuiltinType::UInt: 11093 return IntTy; 11094 case BuiltinType::ULong: 11095 return LongTy; 11096 case BuiltinType::ULongLong: 11097 return LongLongTy; 11098 case BuiltinType::UInt128: 11099 return Int128Ty; 11100 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 11101 // there's no matching "signed wchar_t". Therefore we return the signed 11102 // version of its underlying type instead. 11103 case BuiltinType::WChar_U: 11104 return getSignedWCharType(); 11105 11106 case BuiltinType::UShortAccum: 11107 return ShortAccumTy; 11108 case BuiltinType::UAccum: 11109 return AccumTy; 11110 case BuiltinType::ULongAccum: 11111 return LongAccumTy; 11112 case BuiltinType::SatUShortAccum: 11113 return SatShortAccumTy; 11114 case BuiltinType::SatUAccum: 11115 return SatAccumTy; 11116 case BuiltinType::SatULongAccum: 11117 return SatLongAccumTy; 11118 case BuiltinType::UShortFract: 11119 return ShortFractTy; 11120 case BuiltinType::UFract: 11121 return FractTy; 11122 case BuiltinType::ULongFract: 11123 return LongFractTy; 11124 case BuiltinType::SatUShortFract: 11125 return SatShortFractTy; 11126 case BuiltinType::SatUFract: 11127 return SatFractTy; 11128 case BuiltinType::SatULongFract: 11129 return SatLongFractTy; 11130 default: 11131 assert( 11132 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 11133 "Unexpected signed integer or fixed point type"); 11134 return T; 11135 } 11136 } 11137 11138 ASTMutationListener::~ASTMutationListener() = default; 11139 11140 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 11141 QualType ReturnType) {} 11142 11143 //===----------------------------------------------------------------------===// 11144 // Builtin Type Computation 11145 //===----------------------------------------------------------------------===// 11146 11147 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 11148 /// pointer over the consumed characters. This returns the resultant type. If 11149 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 11150 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 11151 /// a vector of "i*". 11152 /// 11153 /// RequiresICE is filled in on return to indicate whether the value is required 11154 /// to be an Integer Constant Expression. 11155 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 11156 ASTContext::GetBuiltinTypeError &Error, 11157 bool &RequiresICE, 11158 bool AllowTypeModifiers) { 11159 // Modifiers. 11160 int HowLong = 0; 11161 bool Signed = false, Unsigned = false; 11162 RequiresICE = false; 11163 11164 // Read the prefixed modifiers first. 11165 bool Done = false; 11166 #ifndef NDEBUG 11167 bool IsSpecial = false; 11168 #endif 11169 while (!Done) { 11170 switch (*Str++) { 11171 default: Done = true; --Str; break; 11172 case 'I': 11173 RequiresICE = true; 11174 break; 11175 case 'S': 11176 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 11177 assert(!Signed && "Can't use 'S' modifier multiple times!"); 11178 Signed = true; 11179 break; 11180 case 'U': 11181 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 11182 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 11183 Unsigned = true; 11184 break; 11185 case 'L': 11186 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 11187 assert(HowLong <= 2 && "Can't have LLLL modifier"); 11188 ++HowLong; 11189 break; 11190 case 'N': 11191 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 11192 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11193 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 11194 #ifndef NDEBUG 11195 IsSpecial = true; 11196 #endif 11197 if (Context.getTargetInfo().getLongWidth() == 32) 11198 ++HowLong; 11199 break; 11200 case 'W': 11201 // This modifier represents int64 type. 11202 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11203 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 11204 #ifndef NDEBUG 11205 IsSpecial = true; 11206 #endif 11207 switch (Context.getTargetInfo().getInt64Type()) { 11208 default: 11209 llvm_unreachable("Unexpected integer type"); 11210 case TargetInfo::SignedLong: 11211 HowLong = 1; 11212 break; 11213 case TargetInfo::SignedLongLong: 11214 HowLong = 2; 11215 break; 11216 } 11217 break; 11218 case 'Z': 11219 // This modifier represents int32 type. 11220 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11221 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 11222 #ifndef NDEBUG 11223 IsSpecial = true; 11224 #endif 11225 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 11226 default: 11227 llvm_unreachable("Unexpected integer type"); 11228 case TargetInfo::SignedInt: 11229 HowLong = 0; 11230 break; 11231 case TargetInfo::SignedLong: 11232 HowLong = 1; 11233 break; 11234 case TargetInfo::SignedLongLong: 11235 HowLong = 2; 11236 break; 11237 } 11238 break; 11239 case 'O': 11240 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11241 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 11242 #ifndef NDEBUG 11243 IsSpecial = true; 11244 #endif 11245 if (Context.getLangOpts().OpenCL) 11246 HowLong = 1; 11247 else 11248 HowLong = 2; 11249 break; 11250 } 11251 } 11252 11253 QualType Type; 11254 11255 // Read the base type. 11256 switch (*Str++) { 11257 default: llvm_unreachable("Unknown builtin type letter!"); 11258 case 'x': 11259 assert(HowLong == 0 && !Signed && !Unsigned && 11260 "Bad modifiers used with 'x'!"); 11261 Type = Context.Float16Ty; 11262 break; 11263 case 'y': 11264 assert(HowLong == 0 && !Signed && !Unsigned && 11265 "Bad modifiers used with 'y'!"); 11266 Type = Context.BFloat16Ty; 11267 break; 11268 case 'v': 11269 assert(HowLong == 0 && !Signed && !Unsigned && 11270 "Bad modifiers used with 'v'!"); 11271 Type = Context.VoidTy; 11272 break; 11273 case 'h': 11274 assert(HowLong == 0 && !Signed && !Unsigned && 11275 "Bad modifiers used with 'h'!"); 11276 Type = Context.HalfTy; 11277 break; 11278 case 'f': 11279 assert(HowLong == 0 && !Signed && !Unsigned && 11280 "Bad modifiers used with 'f'!"); 11281 Type = Context.FloatTy; 11282 break; 11283 case 'd': 11284 assert(HowLong < 3 && !Signed && !Unsigned && 11285 "Bad modifiers used with 'd'!"); 11286 if (HowLong == 1) 11287 Type = Context.LongDoubleTy; 11288 else if (HowLong == 2) 11289 Type = Context.Float128Ty; 11290 else 11291 Type = Context.DoubleTy; 11292 break; 11293 case 's': 11294 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11295 if (Unsigned) 11296 Type = Context.UnsignedShortTy; 11297 else 11298 Type = Context.ShortTy; 11299 break; 11300 case 'i': 11301 if (HowLong == 3) 11302 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11303 else if (HowLong == 2) 11304 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11305 else if (HowLong == 1) 11306 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11307 else 11308 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11309 break; 11310 case 'c': 11311 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11312 if (Signed) 11313 Type = Context.SignedCharTy; 11314 else if (Unsigned) 11315 Type = Context.UnsignedCharTy; 11316 else 11317 Type = Context.CharTy; 11318 break; 11319 case 'b': // boolean 11320 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11321 Type = Context.BoolTy; 11322 break; 11323 case 'z': // size_t. 11324 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11325 Type = Context.getSizeType(); 11326 break; 11327 case 'w': // wchar_t. 11328 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11329 Type = Context.getWideCharType(); 11330 break; 11331 case 'F': 11332 Type = Context.getCFConstantStringType(); 11333 break; 11334 case 'G': 11335 Type = Context.getObjCIdType(); 11336 break; 11337 case 'H': 11338 Type = Context.getObjCSelType(); 11339 break; 11340 case 'M': 11341 Type = Context.getObjCSuperType(); 11342 break; 11343 case 'a': 11344 Type = Context.getBuiltinVaListType(); 11345 assert(!Type.isNull() && "builtin va list type not initialized!"); 11346 break; 11347 case 'A': 11348 // This is a "reference" to a va_list; however, what exactly 11349 // this means depends on how va_list is defined. There are two 11350 // different kinds of va_list: ones passed by value, and ones 11351 // passed by reference. An example of a by-value va_list is 11352 // x86, where va_list is a char*. An example of by-ref va_list 11353 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11354 // we want this argument to be a char*&; for x86-64, we want 11355 // it to be a __va_list_tag*. 11356 Type = Context.getBuiltinVaListType(); 11357 assert(!Type.isNull() && "builtin va list type not initialized!"); 11358 if (Type->isArrayType()) 11359 Type = Context.getArrayDecayedType(Type); 11360 else 11361 Type = Context.getLValueReferenceType(Type); 11362 break; 11363 case 'q': { 11364 char *End; 11365 unsigned NumElements = strtoul(Str, &End, 10); 11366 assert(End != Str && "Missing vector size"); 11367 Str = End; 11368 11369 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11370 RequiresICE, false); 11371 assert(!RequiresICE && "Can't require vector ICE"); 11372 11373 Type = Context.getScalableVectorType(ElementType, NumElements); 11374 break; 11375 } 11376 case 'Q': { 11377 switch (*Str++) { 11378 case 'a': { 11379 Type = Context.SveCountTy; 11380 break; 11381 } 11382 default: 11383 llvm_unreachable("Unexpected target builtin type"); 11384 } 11385 break; 11386 } 11387 case 'V': { 11388 char *End; 11389 unsigned NumElements = strtoul(Str, &End, 10); 11390 assert(End != Str && "Missing vector size"); 11391 Str = End; 11392 11393 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11394 RequiresICE, false); 11395 assert(!RequiresICE && "Can't require vector ICE"); 11396 11397 // TODO: No way to make AltiVec vectors in builtins yet. 11398 Type = Context.getVectorType(ElementType, NumElements, VectorKind::Generic); 11399 break; 11400 } 11401 case 'E': { 11402 char *End; 11403 11404 unsigned NumElements = strtoul(Str, &End, 10); 11405 assert(End != Str && "Missing vector size"); 11406 11407 Str = End; 11408 11409 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11410 false); 11411 Type = Context.getExtVectorType(ElementType, NumElements); 11412 break; 11413 } 11414 case 'X': { 11415 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11416 false); 11417 assert(!RequiresICE && "Can't require complex ICE"); 11418 Type = Context.getComplexType(ElementType); 11419 break; 11420 } 11421 case 'Y': 11422 Type = Context.getPointerDiffType(); 11423 break; 11424 case 'P': 11425 Type = Context.getFILEType(); 11426 if (Type.isNull()) { 11427 Error = ASTContext::GE_Missing_stdio; 11428 return {}; 11429 } 11430 break; 11431 case 'J': 11432 if (Signed) 11433 Type = Context.getsigjmp_bufType(); 11434 else 11435 Type = Context.getjmp_bufType(); 11436 11437 if (Type.isNull()) { 11438 Error = ASTContext::GE_Missing_setjmp; 11439 return {}; 11440 } 11441 break; 11442 case 'K': 11443 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11444 Type = Context.getucontext_tType(); 11445 11446 if (Type.isNull()) { 11447 Error = ASTContext::GE_Missing_ucontext; 11448 return {}; 11449 } 11450 break; 11451 case 'p': 11452 Type = Context.getProcessIDType(); 11453 break; 11454 } 11455 11456 // If there are modifiers and if we're allowed to parse them, go for it. 11457 Done = !AllowTypeModifiers; 11458 while (!Done) { 11459 switch (char c = *Str++) { 11460 default: Done = true; --Str; break; 11461 case '*': 11462 case '&': { 11463 // Both pointers and references can have their pointee types 11464 // qualified with an address space. 11465 char *End; 11466 unsigned AddrSpace = strtoul(Str, &End, 10); 11467 if (End != Str) { 11468 // Note AddrSpace == 0 is not the same as an unspecified address space. 11469 Type = Context.getAddrSpaceQualType( 11470 Type, 11471 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11472 Str = End; 11473 } 11474 if (c == '*') 11475 Type = Context.getPointerType(Type); 11476 else 11477 Type = Context.getLValueReferenceType(Type); 11478 break; 11479 } 11480 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11481 case 'C': 11482 Type = Type.withConst(); 11483 break; 11484 case 'D': 11485 Type = Context.getVolatileType(Type); 11486 break; 11487 case 'R': 11488 Type = Type.withRestrict(); 11489 break; 11490 } 11491 } 11492 11493 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11494 "Integer constant 'I' type must be an integer"); 11495 11496 return Type; 11497 } 11498 11499 // On some targets such as PowerPC, some of the builtins are defined with custom 11500 // type descriptors for target-dependent types. These descriptors are decoded in 11501 // other functions, but it may be useful to be able to fall back to default 11502 // descriptor decoding to define builtins mixing target-dependent and target- 11503 // independent types. This function allows decoding one type descriptor with 11504 // default decoding. 11505 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11506 GetBuiltinTypeError &Error, bool &RequireICE, 11507 bool AllowTypeModifiers) const { 11508 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11509 } 11510 11511 /// GetBuiltinType - Return the type for the specified builtin. 11512 QualType ASTContext::GetBuiltinType(unsigned Id, 11513 GetBuiltinTypeError &Error, 11514 unsigned *IntegerConstantArgs) const { 11515 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11516 if (TypeStr[0] == '\0') { 11517 Error = GE_Missing_type; 11518 return {}; 11519 } 11520 11521 SmallVector<QualType, 8> ArgTypes; 11522 11523 bool RequiresICE = false; 11524 Error = GE_None; 11525 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11526 RequiresICE, true); 11527 if (Error != GE_None) 11528 return {}; 11529 11530 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11531 11532 while (TypeStr[0] && TypeStr[0] != '.') { 11533 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11534 if (Error != GE_None) 11535 return {}; 11536 11537 // If this argument is required to be an IntegerConstantExpression and the 11538 // caller cares, fill in the bitmask we return. 11539 if (RequiresICE && IntegerConstantArgs) 11540 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11541 11542 // Do array -> pointer decay. The builtin should use the decayed type. 11543 if (Ty->isArrayType()) 11544 Ty = getArrayDecayedType(Ty); 11545 11546 ArgTypes.push_back(Ty); 11547 } 11548 11549 if (Id == Builtin::BI__GetExceptionInfo) 11550 return {}; 11551 11552 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11553 "'.' should only occur at end of builtin type list!"); 11554 11555 bool Variadic = (TypeStr[0] == '.'); 11556 11557 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11558 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11559 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11560 11561 11562 // We really shouldn't be making a no-proto type here. 11563 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 11564 return getFunctionNoProtoType(ResType, EI); 11565 11566 FunctionProtoType::ExtProtoInfo EPI; 11567 EPI.ExtInfo = EI; 11568 EPI.Variadic = Variadic; 11569 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11570 EPI.ExceptionSpec.Type = 11571 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11572 11573 return getFunctionType(ResType, ArgTypes, EPI); 11574 } 11575 11576 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11577 const FunctionDecl *FD) { 11578 if (!FD->isExternallyVisible()) 11579 return GVA_Internal; 11580 11581 // Non-user-provided functions get emitted as weak definitions with every 11582 // use, no matter whether they've been explicitly instantiated etc. 11583 if (!FD->isUserProvided()) 11584 return GVA_DiscardableODR; 11585 11586 GVALinkage External; 11587 switch (FD->getTemplateSpecializationKind()) { 11588 case TSK_Undeclared: 11589 case TSK_ExplicitSpecialization: 11590 External = GVA_StrongExternal; 11591 break; 11592 11593 case TSK_ExplicitInstantiationDefinition: 11594 return GVA_StrongODR; 11595 11596 // C++11 [temp.explicit]p10: 11597 // [ Note: The intent is that an inline function that is the subject of 11598 // an explicit instantiation declaration will still be implicitly 11599 // instantiated when used so that the body can be considered for 11600 // inlining, but that no out-of-line copy of the inline function would be 11601 // generated in the translation unit. -- end note ] 11602 case TSK_ExplicitInstantiationDeclaration: 11603 return GVA_AvailableExternally; 11604 11605 case TSK_ImplicitInstantiation: 11606 External = GVA_DiscardableODR; 11607 break; 11608 } 11609 11610 if (!FD->isInlined()) 11611 return External; 11612 11613 if ((!Context.getLangOpts().CPlusPlus && 11614 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11615 !FD->hasAttr<DLLExportAttr>()) || 11616 FD->hasAttr<GNUInlineAttr>()) { 11617 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11618 11619 // GNU or C99 inline semantics. Determine whether this symbol should be 11620 // externally visible. 11621 if (FD->isInlineDefinitionExternallyVisible()) 11622 return External; 11623 11624 // C99 inline semantics, where the symbol is not externally visible. 11625 return GVA_AvailableExternally; 11626 } 11627 11628 // Functions specified with extern and inline in -fms-compatibility mode 11629 // forcibly get emitted. While the body of the function cannot be later 11630 // replaced, the function definition cannot be discarded. 11631 if (FD->isMSExternInline()) 11632 return GVA_StrongODR; 11633 11634 if (Context.getTargetInfo().getCXXABI().isMicrosoft() && 11635 isa<CXXConstructorDecl>(FD) && 11636 cast<CXXConstructorDecl>(FD)->isInheritingConstructor()) 11637 // Our approach to inheriting constructors is fundamentally different from 11638 // that used by the MS ABI, so keep our inheriting constructor thunks 11639 // internal rather than trying to pick an unambiguous mangling for them. 11640 return GVA_Internal; 11641 11642 return GVA_DiscardableODR; 11643 } 11644 11645 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11646 const Decl *D, GVALinkage L) { 11647 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11648 // dllexport/dllimport on inline functions. 11649 if (D->hasAttr<DLLImportAttr>()) { 11650 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11651 return GVA_AvailableExternally; 11652 } else if (D->hasAttr<DLLExportAttr>()) { 11653 if (L == GVA_DiscardableODR) 11654 return GVA_StrongODR; 11655 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11656 // Device-side functions with __global__ attribute must always be 11657 // visible externally so they can be launched from host. 11658 if (D->hasAttr<CUDAGlobalAttr>() && 11659 (L == GVA_DiscardableODR || L == GVA_Internal)) 11660 return GVA_StrongODR; 11661 // Single source offloading languages like CUDA/HIP need to be able to 11662 // access static device variables from host code of the same compilation 11663 // unit. This is done by externalizing the static variable with a shared 11664 // name between the host and device compilation which is the same for the 11665 // same compilation unit whereas different among different compilation 11666 // units. 11667 if (Context.shouldExternalize(D)) 11668 return GVA_StrongExternal; 11669 } 11670 return L; 11671 } 11672 11673 /// Adjust the GVALinkage for a declaration based on what an external AST source 11674 /// knows about whether there can be other definitions of this declaration. 11675 static GVALinkage 11676 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11677 GVALinkage L) { 11678 ExternalASTSource *Source = Ctx.getExternalSource(); 11679 if (!Source) 11680 return L; 11681 11682 switch (Source->hasExternalDefinitions(D)) { 11683 case ExternalASTSource::EK_Never: 11684 // Other translation units rely on us to provide the definition. 11685 if (L == GVA_DiscardableODR) 11686 return GVA_StrongODR; 11687 break; 11688 11689 case ExternalASTSource::EK_Always: 11690 return GVA_AvailableExternally; 11691 11692 case ExternalASTSource::EK_ReplyHazy: 11693 break; 11694 } 11695 return L; 11696 } 11697 11698 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11699 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11700 adjustGVALinkageForAttributes(*this, FD, 11701 basicGVALinkageForFunction(*this, FD))); 11702 } 11703 11704 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11705 const VarDecl *VD) { 11706 // As an extension for interactive REPLs, make sure constant variables are 11707 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl 11708 // marking them as internal. 11709 if (Context.getLangOpts().CPlusPlus && 11710 Context.getLangOpts().IncrementalExtensions && 11711 VD->getType().isConstQualified() && 11712 !VD->getType().isVolatileQualified() && !VD->isInline() && 11713 !isa<VarTemplateSpecializationDecl>(VD) && !VD->getDescribedVarTemplate()) 11714 return GVA_DiscardableODR; 11715 11716 if (!VD->isExternallyVisible()) 11717 return GVA_Internal; 11718 11719 if (VD->isStaticLocal()) { 11720 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11721 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11722 LexicalContext = LexicalContext->getLexicalParent(); 11723 11724 // ObjC Blocks can create local variables that don't have a FunctionDecl 11725 // LexicalContext. 11726 if (!LexicalContext) 11727 return GVA_DiscardableODR; 11728 11729 // Otherwise, let the static local variable inherit its linkage from the 11730 // nearest enclosing function. 11731 auto StaticLocalLinkage = 11732 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11733 11734 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11735 // be emitted in any object with references to the symbol for the object it 11736 // contains, whether inline or out-of-line." 11737 // Similar behavior is observed with MSVC. An alternative ABI could use 11738 // StrongODR/AvailableExternally to match the function, but none are 11739 // known/supported currently. 11740 if (StaticLocalLinkage == GVA_StrongODR || 11741 StaticLocalLinkage == GVA_AvailableExternally) 11742 return GVA_DiscardableODR; 11743 return StaticLocalLinkage; 11744 } 11745 11746 // MSVC treats in-class initialized static data members as definitions. 11747 // By giving them non-strong linkage, out-of-line definitions won't 11748 // cause link errors. 11749 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11750 return GVA_DiscardableODR; 11751 11752 // Most non-template variables have strong linkage; inline variables are 11753 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11754 GVALinkage StrongLinkage; 11755 switch (Context.getInlineVariableDefinitionKind(VD)) { 11756 case ASTContext::InlineVariableDefinitionKind::None: 11757 StrongLinkage = GVA_StrongExternal; 11758 break; 11759 case ASTContext::InlineVariableDefinitionKind::Weak: 11760 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11761 StrongLinkage = GVA_DiscardableODR; 11762 break; 11763 case ASTContext::InlineVariableDefinitionKind::Strong: 11764 StrongLinkage = GVA_StrongODR; 11765 break; 11766 } 11767 11768 switch (VD->getTemplateSpecializationKind()) { 11769 case TSK_Undeclared: 11770 return StrongLinkage; 11771 11772 case TSK_ExplicitSpecialization: 11773 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11774 VD->isStaticDataMember() 11775 ? GVA_StrongODR 11776 : StrongLinkage; 11777 11778 case TSK_ExplicitInstantiationDefinition: 11779 return GVA_StrongODR; 11780 11781 case TSK_ExplicitInstantiationDeclaration: 11782 return GVA_AvailableExternally; 11783 11784 case TSK_ImplicitInstantiation: 11785 return GVA_DiscardableODR; 11786 } 11787 11788 llvm_unreachable("Invalid Linkage!"); 11789 } 11790 11791 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const { 11792 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11793 adjustGVALinkageForAttributes(*this, VD, 11794 basicGVALinkageForVariable(*this, VD))); 11795 } 11796 11797 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11798 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11799 if (!VD->isFileVarDecl()) 11800 return false; 11801 // Global named register variables (GNU extension) are never emitted. 11802 if (VD->getStorageClass() == SC_Register) 11803 return false; 11804 if (VD->getDescribedVarTemplate() || 11805 isa<VarTemplatePartialSpecializationDecl>(VD)) 11806 return false; 11807 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11808 // We never need to emit an uninstantiated function template. 11809 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11810 return false; 11811 } else if (isa<PragmaCommentDecl>(D)) 11812 return true; 11813 else if (isa<PragmaDetectMismatchDecl>(D)) 11814 return true; 11815 else if (isa<OMPRequiresDecl>(D)) 11816 return true; 11817 else if (isa<OMPThreadPrivateDecl>(D)) 11818 return !D->getDeclContext()->isDependentContext(); 11819 else if (isa<OMPAllocateDecl>(D)) 11820 return !D->getDeclContext()->isDependentContext(); 11821 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11822 return !D->getDeclContext()->isDependentContext(); 11823 else if (isa<ImportDecl>(D)) 11824 return true; 11825 else 11826 return false; 11827 11828 // If this is a member of a class template, we do not need to emit it. 11829 if (D->getDeclContext()->isDependentContext()) 11830 return false; 11831 11832 // Weak references don't produce any output by themselves. 11833 if (D->hasAttr<WeakRefAttr>()) 11834 return false; 11835 11836 // Aliases and used decls are required. 11837 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11838 return true; 11839 11840 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11841 // Forward declarations aren't required. 11842 if (!FD->doesThisDeclarationHaveABody()) 11843 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11844 11845 // Constructors and destructors are required. 11846 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11847 return true; 11848 11849 // The key function for a class is required. This rule only comes 11850 // into play when inline functions can be key functions, though. 11851 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11852 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11853 const CXXRecordDecl *RD = MD->getParent(); 11854 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11855 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11856 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11857 return true; 11858 } 11859 } 11860 } 11861 11862 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11863 11864 // static, static inline, always_inline, and extern inline functions can 11865 // always be deferred. Normal inline functions can be deferred in C99/C++. 11866 // Implicit template instantiations can also be deferred in C++. 11867 return !isDiscardableGVALinkage(Linkage); 11868 } 11869 11870 const auto *VD = cast<VarDecl>(D); 11871 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11872 11873 // If the decl is marked as `declare target to`, it should be emitted for the 11874 // host and for the device. 11875 if (LangOpts.OpenMP && 11876 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11877 return true; 11878 11879 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11880 !isMSStaticDataMemberInlineDefinition(VD)) 11881 return false; 11882 11883 // Variables in other module units shouldn't be forced to be emitted. 11884 if (VD->isInAnotherModuleUnit()) 11885 return false; 11886 11887 // Variables that can be needed in other TUs are required. 11888 auto Linkage = GetGVALinkageForVariable(VD); 11889 if (!isDiscardableGVALinkage(Linkage)) 11890 return true; 11891 11892 // We never need to emit a variable that is available in another TU. 11893 if (Linkage == GVA_AvailableExternally) 11894 return false; 11895 11896 // Variables that have destruction with side-effects are required. 11897 if (VD->needsDestruction(*this)) 11898 return true; 11899 11900 // Variables that have initialization with side-effects are required. 11901 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11902 // We can get a value-dependent initializer during error recovery. 11903 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11904 return true; 11905 11906 // Likewise, variables with tuple-like bindings are required if their 11907 // bindings have side-effects. 11908 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11909 for (const auto *BD : DD->bindings()) 11910 if (const auto *BindingVD = BD->getHoldingVar()) 11911 if (DeclMustBeEmitted(BindingVD)) 11912 return true; 11913 11914 return false; 11915 } 11916 11917 void ASTContext::forEachMultiversionedFunctionVersion( 11918 const FunctionDecl *FD, 11919 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11920 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11921 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11922 FD = FD->getMostRecentDecl(); 11923 // FIXME: The order of traversal here matters and depends on the order of 11924 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11925 // shouldn't rely on that. 11926 for (auto *CurDecl : 11927 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11928 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11929 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11930 !SeenDecls.contains(CurFD)) { 11931 SeenDecls.insert(CurFD); 11932 Pred(CurFD); 11933 } 11934 } 11935 } 11936 11937 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11938 bool IsCXXMethod, 11939 bool IsBuiltin) const { 11940 // Pass through to the C++ ABI object 11941 if (IsCXXMethod) 11942 return ABI->getDefaultMethodCallConv(IsVariadic); 11943 11944 // Builtins ignore user-specified default calling convention and remain the 11945 // Target's default calling convention. 11946 if (!IsBuiltin) { 11947 switch (LangOpts.getDefaultCallingConv()) { 11948 case LangOptions::DCC_None: 11949 break; 11950 case LangOptions::DCC_CDecl: 11951 return CC_C; 11952 case LangOptions::DCC_FastCall: 11953 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11954 return CC_X86FastCall; 11955 break; 11956 case LangOptions::DCC_StdCall: 11957 if (!IsVariadic) 11958 return CC_X86StdCall; 11959 break; 11960 case LangOptions::DCC_VectorCall: 11961 // __vectorcall cannot be applied to variadic functions. 11962 if (!IsVariadic) 11963 return CC_X86VectorCall; 11964 break; 11965 case LangOptions::DCC_RegCall: 11966 // __regcall cannot be applied to variadic functions. 11967 if (!IsVariadic) 11968 return CC_X86RegCall; 11969 break; 11970 case LangOptions::DCC_RtdCall: 11971 if (!IsVariadic) 11972 return CC_M68kRTD; 11973 break; 11974 } 11975 } 11976 return Target->getDefaultCallingConv(); 11977 } 11978 11979 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11980 // Pass through to the C++ ABI object 11981 return ABI->isNearlyEmpty(RD); 11982 } 11983 11984 VTableContextBase *ASTContext::getVTableContext() { 11985 if (!VTContext.get()) { 11986 auto ABI = Target->getCXXABI(); 11987 if (ABI.isMicrosoft()) 11988 VTContext.reset(new MicrosoftVTableContext(*this)); 11989 else { 11990 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11991 ? ItaniumVTableContext::Relative 11992 : ItaniumVTableContext::Pointer; 11993 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 11994 } 11995 } 11996 return VTContext.get(); 11997 } 11998 11999 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 12000 if (!T) 12001 T = Target; 12002 switch (T->getCXXABI().getKind()) { 12003 case TargetCXXABI::AppleARM64: 12004 case TargetCXXABI::Fuchsia: 12005 case TargetCXXABI::GenericAArch64: 12006 case TargetCXXABI::GenericItanium: 12007 case TargetCXXABI::GenericARM: 12008 case TargetCXXABI::GenericMIPS: 12009 case TargetCXXABI::iOS: 12010 case TargetCXXABI::WebAssembly: 12011 case TargetCXXABI::WatchOS: 12012 case TargetCXXABI::XL: 12013 return ItaniumMangleContext::create(*this, getDiagnostics()); 12014 case TargetCXXABI::Microsoft: 12015 return MicrosoftMangleContext::create(*this, getDiagnostics()); 12016 } 12017 llvm_unreachable("Unsupported ABI"); 12018 } 12019 12020 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 12021 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 12022 "Device mangle context does not support Microsoft mangling."); 12023 switch (T.getCXXABI().getKind()) { 12024 case TargetCXXABI::AppleARM64: 12025 case TargetCXXABI::Fuchsia: 12026 case TargetCXXABI::GenericAArch64: 12027 case TargetCXXABI::GenericItanium: 12028 case TargetCXXABI::GenericARM: 12029 case TargetCXXABI::GenericMIPS: 12030 case TargetCXXABI::iOS: 12031 case TargetCXXABI::WebAssembly: 12032 case TargetCXXABI::WatchOS: 12033 case TargetCXXABI::XL: 12034 return ItaniumMangleContext::create( 12035 *this, getDiagnostics(), 12036 [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> { 12037 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 12038 return RD->getDeviceLambdaManglingNumber(); 12039 return std::nullopt; 12040 }, 12041 /*IsAux=*/true); 12042 case TargetCXXABI::Microsoft: 12043 return MicrosoftMangleContext::create(*this, getDiagnostics(), 12044 /*IsAux=*/true); 12045 } 12046 llvm_unreachable("Unsupported ABI"); 12047 } 12048 12049 CXXABI::~CXXABI() = default; 12050 12051 size_t ASTContext::getSideTableAllocatedMemory() const { 12052 return ASTRecordLayouts.getMemorySize() + 12053 llvm::capacity_in_bytes(ObjCLayouts) + 12054 llvm::capacity_in_bytes(KeyFunctions) + 12055 llvm::capacity_in_bytes(ObjCImpls) + 12056 llvm::capacity_in_bytes(BlockVarCopyInits) + 12057 llvm::capacity_in_bytes(DeclAttrs) + 12058 llvm::capacity_in_bytes(TemplateOrInstantiation) + 12059 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 12060 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 12061 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 12062 llvm::capacity_in_bytes(OverriddenMethods) + 12063 llvm::capacity_in_bytes(Types) + 12064 llvm::capacity_in_bytes(VariableArrayTypes); 12065 } 12066 12067 /// getIntTypeForBitwidth - 12068 /// sets integer QualTy according to specified details: 12069 /// bitwidth, signed/unsigned. 12070 /// Returns empty type if there is no appropriate target types. 12071 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 12072 unsigned Signed) const { 12073 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 12074 CanQualType QualTy = getFromTargetType(Ty); 12075 if (!QualTy && DestWidth == 128) 12076 return Signed ? Int128Ty : UnsignedInt128Ty; 12077 return QualTy; 12078 } 12079 12080 /// getRealTypeForBitwidth - 12081 /// sets floating point QualTy according to specified bitwidth. 12082 /// Returns empty type if there is no appropriate target types. 12083 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 12084 FloatModeKind ExplicitType) const { 12085 FloatModeKind Ty = 12086 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 12087 switch (Ty) { 12088 case FloatModeKind::Half: 12089 return HalfTy; 12090 case FloatModeKind::Float: 12091 return FloatTy; 12092 case FloatModeKind::Double: 12093 return DoubleTy; 12094 case FloatModeKind::LongDouble: 12095 return LongDoubleTy; 12096 case FloatModeKind::Float128: 12097 return Float128Ty; 12098 case FloatModeKind::Ibm128: 12099 return Ibm128Ty; 12100 case FloatModeKind::NoFloat: 12101 return {}; 12102 } 12103 12104 llvm_unreachable("Unhandled TargetInfo::RealType value"); 12105 } 12106 12107 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 12108 if (Number > 1) 12109 MangleNumbers[ND] = Number; 12110 } 12111 12112 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 12113 bool ForAuxTarget) const { 12114 auto I = MangleNumbers.find(ND); 12115 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 12116 // CUDA/HIP host compilation encodes host and device mangling numbers 12117 // as lower and upper half of 32 bit integer. 12118 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 12119 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 12120 } else { 12121 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 12122 "number for aux target"); 12123 } 12124 return Res > 1 ? Res : 1; 12125 } 12126 12127 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 12128 if (Number > 1) 12129 StaticLocalNumbers[VD] = Number; 12130 } 12131 12132 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 12133 auto I = StaticLocalNumbers.find(VD); 12134 return I != StaticLocalNumbers.end() ? I->second : 1; 12135 } 12136 12137 MangleNumberingContext & 12138 ASTContext::getManglingNumberContext(const DeclContext *DC) { 12139 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12140 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 12141 if (!MCtx) 12142 MCtx = createMangleNumberingContext(); 12143 return *MCtx; 12144 } 12145 12146 MangleNumberingContext & 12147 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 12148 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12149 std::unique_ptr<MangleNumberingContext> &MCtx = 12150 ExtraMangleNumberingContexts[D]; 12151 if (!MCtx) 12152 MCtx = createMangleNumberingContext(); 12153 return *MCtx; 12154 } 12155 12156 std::unique_ptr<MangleNumberingContext> 12157 ASTContext::createMangleNumberingContext() const { 12158 return ABI->createMangleNumberingContext(); 12159 } 12160 12161 const CXXConstructorDecl * 12162 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 12163 return ABI->getCopyConstructorForExceptionObject( 12164 cast<CXXRecordDecl>(RD->getFirstDecl())); 12165 } 12166 12167 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 12168 CXXConstructorDecl *CD) { 12169 return ABI->addCopyConstructorForExceptionObject( 12170 cast<CXXRecordDecl>(RD->getFirstDecl()), 12171 cast<CXXConstructorDecl>(CD->getFirstDecl())); 12172 } 12173 12174 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 12175 TypedefNameDecl *DD) { 12176 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 12177 } 12178 12179 TypedefNameDecl * 12180 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 12181 return ABI->getTypedefNameForUnnamedTagDecl(TD); 12182 } 12183 12184 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 12185 DeclaratorDecl *DD) { 12186 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 12187 } 12188 12189 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 12190 return ABI->getDeclaratorForUnnamedTagDecl(TD); 12191 } 12192 12193 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 12194 ParamIndices[D] = index; 12195 } 12196 12197 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 12198 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 12199 assert(I != ParamIndices.end() && 12200 "ParmIndices lacks entry set by ParmVarDecl"); 12201 return I->second; 12202 } 12203 12204 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 12205 unsigned Length) const { 12206 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 12207 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 12208 EltTy = EltTy.withConst(); 12209 12210 EltTy = adjustStringLiteralBaseType(EltTy); 12211 12212 // Get an array type for the string, according to C99 6.4.5. This includes 12213 // the null terminator character. 12214 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 12215 ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0); 12216 } 12217 12218 StringLiteral * 12219 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 12220 StringLiteral *&Result = StringLiteralCache[Key]; 12221 if (!Result) 12222 Result = StringLiteral::Create( 12223 *this, Key, StringLiteralKind::Ordinary, 12224 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 12225 SourceLocation()); 12226 return Result; 12227 } 12228 12229 MSGuidDecl * 12230 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 12231 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 12232 12233 llvm::FoldingSetNodeID ID; 12234 MSGuidDecl::Profile(ID, Parts); 12235 12236 void *InsertPos; 12237 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 12238 return Existing; 12239 12240 QualType GUIDType = getMSGuidType().withConst(); 12241 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 12242 MSGuidDecls.InsertNode(New, InsertPos); 12243 return New; 12244 } 12245 12246 UnnamedGlobalConstantDecl * 12247 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 12248 const APValue &APVal) const { 12249 llvm::FoldingSetNodeID ID; 12250 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 12251 12252 void *InsertPos; 12253 if (UnnamedGlobalConstantDecl *Existing = 12254 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 12255 return Existing; 12256 12257 UnnamedGlobalConstantDecl *New = 12258 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 12259 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 12260 return New; 12261 } 12262 12263 TemplateParamObjectDecl * 12264 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 12265 assert(T->isRecordType() && "template param object of unexpected type"); 12266 12267 // C++ [temp.param]p8: 12268 // [...] a static storage duration object of type 'const T' [...] 12269 T.addConst(); 12270 12271 llvm::FoldingSetNodeID ID; 12272 TemplateParamObjectDecl::Profile(ID, T, V); 12273 12274 void *InsertPos; 12275 if (TemplateParamObjectDecl *Existing = 12276 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 12277 return Existing; 12278 12279 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 12280 TemplateParamObjectDecls.InsertNode(New, InsertPos); 12281 return New; 12282 } 12283 12284 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 12285 const llvm::Triple &T = getTargetInfo().getTriple(); 12286 if (!T.isOSDarwin()) 12287 return false; 12288 12289 if (!(T.isiOS() && T.isOSVersionLT(7)) && 12290 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 12291 return false; 12292 12293 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 12294 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 12295 uint64_t Size = sizeChars.getQuantity(); 12296 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 12297 unsigned Align = alignChars.getQuantity(); 12298 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 12299 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 12300 } 12301 12302 bool 12303 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 12304 const ObjCMethodDecl *MethodImpl) { 12305 // No point trying to match an unavailable/deprecated mothod. 12306 if (MethodDecl->hasAttr<UnavailableAttr>() 12307 || MethodDecl->hasAttr<DeprecatedAttr>()) 12308 return false; 12309 if (MethodDecl->getObjCDeclQualifier() != 12310 MethodImpl->getObjCDeclQualifier()) 12311 return false; 12312 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 12313 return false; 12314 12315 if (MethodDecl->param_size() != MethodImpl->param_size()) 12316 return false; 12317 12318 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 12319 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 12320 EF = MethodDecl->param_end(); 12321 IM != EM && IF != EF; ++IM, ++IF) { 12322 const ParmVarDecl *DeclVar = (*IF); 12323 const ParmVarDecl *ImplVar = (*IM); 12324 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 12325 return false; 12326 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 12327 return false; 12328 } 12329 12330 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12331 } 12332 12333 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12334 LangAS AS; 12335 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12336 AS = LangAS::Default; 12337 else 12338 AS = QT->getPointeeType().getAddressSpace(); 12339 12340 return getTargetInfo().getNullPointerValue(AS); 12341 } 12342 12343 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12344 return getTargetInfo().getTargetAddressSpace(AS); 12345 } 12346 12347 bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const { 12348 if (X == Y) 12349 return true; 12350 if (!X || !Y) 12351 return false; 12352 llvm::FoldingSetNodeID IDX, IDY; 12353 X->Profile(IDX, *this, /*Canonical=*/true); 12354 Y->Profile(IDY, *this, /*Canonical=*/true); 12355 return IDX == IDY; 12356 } 12357 12358 // The getCommon* helpers return, for given 'same' X and Y entities given as 12359 // inputs, another entity which is also the 'same' as the inputs, but which 12360 // is closer to the canonical form of the inputs, each according to a given 12361 // criteria. 12362 // The getCommon*Checked variants are 'null inputs not-allowed' equivalents of 12363 // the regular ones. 12364 12365 static Decl *getCommonDecl(Decl *X, Decl *Y) { 12366 if (!declaresSameEntity(X, Y)) 12367 return nullptr; 12368 for (const Decl *DX : X->redecls()) { 12369 // If we reach Y before reaching the first decl, that means X is older. 12370 if (DX == Y) 12371 return X; 12372 // If we reach the first decl, then Y is older. 12373 if (DX->isFirstDecl()) 12374 return Y; 12375 } 12376 llvm_unreachable("Corrupt redecls chain"); 12377 } 12378 12379 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12380 static T *getCommonDecl(T *X, T *Y) { 12381 return cast_or_null<T>( 12382 getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)), 12383 const_cast<Decl *>(cast_or_null<Decl>(Y)))); 12384 } 12385 12386 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12387 static T *getCommonDeclChecked(T *X, T *Y) { 12388 return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)), 12389 const_cast<Decl *>(cast<Decl>(Y)))); 12390 } 12391 12392 static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X, 12393 TemplateName Y) { 12394 if (X.getAsVoidPointer() == Y.getAsVoidPointer()) 12395 return X; 12396 // FIXME: There are cases here where we could find a common template name 12397 // with more sugar. For example one could be a SubstTemplateTemplate* 12398 // replacing the other. 12399 TemplateName CX = Ctx.getCanonicalTemplateName(X); 12400 if (CX.getAsVoidPointer() != 12401 Ctx.getCanonicalTemplateName(Y).getAsVoidPointer()) 12402 return TemplateName(); 12403 return CX; 12404 } 12405 12406 static TemplateName 12407 getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) { 12408 TemplateName R = getCommonTemplateName(Ctx, X, Y); 12409 assert(R.getAsVoidPointer() != nullptr); 12410 return R; 12411 } 12412 12413 static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs, 12414 ArrayRef<QualType> Ys, bool Unqualified = false) { 12415 assert(Xs.size() == Ys.size()); 12416 SmallVector<QualType, 8> Rs(Xs.size()); 12417 for (size_t I = 0; I < Rs.size(); ++I) 12418 Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified); 12419 return Rs; 12420 } 12421 12422 template <class T> 12423 static SourceLocation getCommonAttrLoc(const T *X, const T *Y) { 12424 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc() 12425 : SourceLocation(); 12426 } 12427 12428 static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx, 12429 const TemplateArgument &X, 12430 const TemplateArgument &Y) { 12431 if (X.getKind() != Y.getKind()) 12432 return TemplateArgument(); 12433 12434 switch (X.getKind()) { 12435 case TemplateArgument::ArgKind::Type: 12436 if (!Ctx.hasSameType(X.getAsType(), Y.getAsType())) 12437 return TemplateArgument(); 12438 return TemplateArgument( 12439 Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType())); 12440 case TemplateArgument::ArgKind::NullPtr: 12441 if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) 12442 return TemplateArgument(); 12443 return TemplateArgument( 12444 Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()), 12445 /*Unqualified=*/true); 12446 case TemplateArgument::ArgKind::Expression: 12447 if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType())) 12448 return TemplateArgument(); 12449 // FIXME: Try to keep the common sugar. 12450 return X; 12451 case TemplateArgument::ArgKind::Template: { 12452 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate(); 12453 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12454 if (!CTN.getAsVoidPointer()) 12455 return TemplateArgument(); 12456 return TemplateArgument(CTN); 12457 } 12458 case TemplateArgument::ArgKind::TemplateExpansion: { 12459 TemplateName TX = X.getAsTemplateOrTemplatePattern(), 12460 TY = Y.getAsTemplateOrTemplatePattern(); 12461 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12462 if (!CTN.getAsVoidPointer()) 12463 return TemplateName(); 12464 auto NExpX = X.getNumTemplateExpansions(); 12465 assert(NExpX == Y.getNumTemplateExpansions()); 12466 return TemplateArgument(CTN, NExpX); 12467 } 12468 default: 12469 // FIXME: Handle the other argument kinds. 12470 return X; 12471 } 12472 } 12473 12474 static bool getCommonTemplateArguments(ASTContext &Ctx, 12475 SmallVectorImpl<TemplateArgument> &R, 12476 ArrayRef<TemplateArgument> Xs, 12477 ArrayRef<TemplateArgument> Ys) { 12478 if (Xs.size() != Ys.size()) 12479 return true; 12480 R.resize(Xs.size()); 12481 for (size_t I = 0; I < R.size(); ++I) { 12482 R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]); 12483 if (R[I].isNull()) 12484 return true; 12485 } 12486 return false; 12487 } 12488 12489 static auto getCommonTemplateArguments(ASTContext &Ctx, 12490 ArrayRef<TemplateArgument> Xs, 12491 ArrayRef<TemplateArgument> Ys) { 12492 SmallVector<TemplateArgument, 8> R; 12493 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys); 12494 assert(!Different); 12495 (void)Different; 12496 return R; 12497 } 12498 12499 template <class T> 12500 static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) { 12501 return X->getKeyword() == Y->getKeyword() ? X->getKeyword() 12502 : ElaboratedTypeKeyword::None; 12503 } 12504 12505 template <class T> 12506 static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X, 12507 const T *Y) { 12508 // FIXME: Try to keep the common NNS sugar. 12509 return X->getQualifier() == Y->getQualifier() 12510 ? X->getQualifier() 12511 : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier()); 12512 } 12513 12514 template <class T> 12515 static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) { 12516 return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType()); 12517 } 12518 12519 template <class T> 12520 static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X, 12521 Qualifiers &QX, const T *Y, 12522 Qualifiers &QY) { 12523 QualType EX = X->getElementType(), EY = Y->getElementType(); 12524 QualType R = Ctx.getCommonSugaredType(EX, EY, 12525 /*Unqualified=*/true); 12526 Qualifiers RQ = R.getQualifiers(); 12527 QX += EX.getQualifiers() - RQ; 12528 QY += EY.getQualifiers() - RQ; 12529 return R; 12530 } 12531 12532 template <class T> 12533 static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) { 12534 return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType()); 12535 } 12536 12537 template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) { 12538 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr())); 12539 return X->getSizeExpr(); 12540 } 12541 12542 static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) { 12543 assert(X->getSizeModifier() == Y->getSizeModifier()); 12544 return X->getSizeModifier(); 12545 } 12546 12547 static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X, 12548 const ArrayType *Y) { 12549 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers()); 12550 return X->getIndexTypeCVRQualifiers(); 12551 } 12552 12553 // Merges two type lists such that the resulting vector will contain 12554 // each type (in a canonical sense) only once, in the order they appear 12555 // from X to Y. If they occur in both X and Y, the result will contain 12556 // the common sugared type between them. 12557 static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out, 12558 ArrayRef<QualType> X, ArrayRef<QualType> Y) { 12559 llvm::DenseMap<QualType, unsigned> Found; 12560 for (auto Ts : {X, Y}) { 12561 for (QualType T : Ts) { 12562 auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size()); 12563 if (!Res.second) { 12564 QualType &U = Out[Res.first->second]; 12565 U = Ctx.getCommonSugaredType(U, T); 12566 } else { 12567 Out.emplace_back(T); 12568 } 12569 } 12570 } 12571 } 12572 12573 FunctionProtoType::ExceptionSpecInfo 12574 ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1, 12575 FunctionProtoType::ExceptionSpecInfo ESI2, 12576 SmallVectorImpl<QualType> &ExceptionTypeStorage, 12577 bool AcceptDependent) { 12578 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type; 12579 12580 // If either of them can throw anything, that is the result. 12581 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) { 12582 if (EST1 == I) 12583 return ESI1; 12584 if (EST2 == I) 12585 return ESI2; 12586 } 12587 12588 // If either of them is non-throwing, the result is the other. 12589 for (auto I : 12590 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) { 12591 if (EST1 == I) 12592 return ESI2; 12593 if (EST2 == I) 12594 return ESI1; 12595 } 12596 12597 // If we're left with value-dependent computed noexcept expressions, we're 12598 // stuck. Before C++17, we can just drop the exception specification entirely, 12599 // since it's not actually part of the canonical type. And this should never 12600 // happen in C++17, because it would mean we were computing the composite 12601 // pointer type of dependent types, which should never happen. 12602 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) { 12603 assert(AcceptDependent && 12604 "computing composite pointer type of dependent types"); 12605 return FunctionProtoType::ExceptionSpecInfo(); 12606 } 12607 12608 // Switch over the possibilities so that people adding new values know to 12609 // update this function. 12610 switch (EST1) { 12611 case EST_None: 12612 case EST_DynamicNone: 12613 case EST_MSAny: 12614 case EST_BasicNoexcept: 12615 case EST_DependentNoexcept: 12616 case EST_NoexceptFalse: 12617 case EST_NoexceptTrue: 12618 case EST_NoThrow: 12619 llvm_unreachable("These ESTs should be handled above"); 12620 12621 case EST_Dynamic: { 12622 // This is the fun case: both exception specifications are dynamic. Form 12623 // the union of the two lists. 12624 assert(EST2 == EST_Dynamic && "other cases should already be handled"); 12625 mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions, 12626 ESI2.Exceptions); 12627 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic); 12628 Result.Exceptions = ExceptionTypeStorage; 12629 return Result; 12630 } 12631 12632 case EST_Unevaluated: 12633 case EST_Uninstantiated: 12634 case EST_Unparsed: 12635 llvm_unreachable("shouldn't see unresolved exception specifications here"); 12636 } 12637 12638 llvm_unreachable("invalid ExceptionSpecificationType"); 12639 } 12640 12641 static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, 12642 Qualifiers &QX, const Type *Y, 12643 Qualifiers &QY) { 12644 Type::TypeClass TC = X->getTypeClass(); 12645 assert(TC == Y->getTypeClass()); 12646 switch (TC) { 12647 #define UNEXPECTED_TYPE(Class, Kind) \ 12648 case Type::Class: \ 12649 llvm_unreachable("Unexpected " Kind ": " #Class); 12650 12651 #define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical") 12652 #define TYPE(Class, Base) 12653 #include "clang/AST/TypeNodes.inc" 12654 12655 #define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free") 12656 SUGAR_FREE_TYPE(Builtin) 12657 SUGAR_FREE_TYPE(DeducedTemplateSpecialization) 12658 SUGAR_FREE_TYPE(DependentBitInt) 12659 SUGAR_FREE_TYPE(Enum) 12660 SUGAR_FREE_TYPE(BitInt) 12661 SUGAR_FREE_TYPE(ObjCInterface) 12662 SUGAR_FREE_TYPE(Record) 12663 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack) 12664 SUGAR_FREE_TYPE(UnresolvedUsing) 12665 #undef SUGAR_FREE_TYPE 12666 #define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique") 12667 NON_UNIQUE_TYPE(TypeOfExpr) 12668 NON_UNIQUE_TYPE(VariableArray) 12669 #undef NON_UNIQUE_TYPE 12670 12671 UNEXPECTED_TYPE(TypeOf, "sugar") 12672 12673 #undef UNEXPECTED_TYPE 12674 12675 case Type::Auto: { 12676 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 12677 assert(AX->getDeducedType().isNull()); 12678 assert(AY->getDeducedType().isNull()); 12679 assert(AX->getKeyword() == AY->getKeyword()); 12680 assert(AX->isInstantiationDependentType() == 12681 AY->isInstantiationDependentType()); 12682 auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(), 12683 AY->getTypeConstraintArguments()); 12684 return Ctx.getAutoType(QualType(), AX->getKeyword(), 12685 AX->isInstantiationDependentType(), 12686 AX->containsUnexpandedParameterPack(), 12687 getCommonDeclChecked(AX->getTypeConstraintConcept(), 12688 AY->getTypeConstraintConcept()), 12689 As); 12690 } 12691 case Type::IncompleteArray: { 12692 const auto *AX = cast<IncompleteArrayType>(X), 12693 *AY = cast<IncompleteArrayType>(Y); 12694 return Ctx.getIncompleteArrayType( 12695 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12696 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12697 } 12698 case Type::DependentSizedArray: { 12699 const auto *AX = cast<DependentSizedArrayType>(X), 12700 *AY = cast<DependentSizedArrayType>(Y); 12701 return Ctx.getDependentSizedArrayType( 12702 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12703 getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY), 12704 getCommonIndexTypeCVRQualifiers(AX, AY), 12705 AX->getBracketsRange() == AY->getBracketsRange() 12706 ? AX->getBracketsRange() 12707 : SourceRange()); 12708 } 12709 case Type::ConstantArray: { 12710 const auto *AX = cast<ConstantArrayType>(X), 12711 *AY = cast<ConstantArrayType>(Y); 12712 assert(AX->getSize() == AY->getSize()); 12713 const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) 12714 ? AX->getSizeExpr() 12715 : nullptr; 12716 return Ctx.getConstantArrayType( 12717 getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, 12718 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12719 } 12720 case Type::Atomic: { 12721 const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); 12722 return Ctx.getAtomicType( 12723 Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType())); 12724 } 12725 case Type::Complex: { 12726 const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y); 12727 return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY)); 12728 } 12729 case Type::Pointer: { 12730 const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y); 12731 return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY)); 12732 } 12733 case Type::BlockPointer: { 12734 const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y); 12735 return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY)); 12736 } 12737 case Type::ObjCObjectPointer: { 12738 const auto *PX = cast<ObjCObjectPointerType>(X), 12739 *PY = cast<ObjCObjectPointerType>(Y); 12740 return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY)); 12741 } 12742 case Type::MemberPointer: { 12743 const auto *PX = cast<MemberPointerType>(X), 12744 *PY = cast<MemberPointerType>(Y); 12745 return Ctx.getMemberPointerType( 12746 getCommonPointeeType(Ctx, PX, PY), 12747 Ctx.getCommonSugaredType(QualType(PX->getClass(), 0), 12748 QualType(PY->getClass(), 0)) 12749 .getTypePtr()); 12750 } 12751 case Type::LValueReference: { 12752 const auto *PX = cast<LValueReferenceType>(X), 12753 *PY = cast<LValueReferenceType>(Y); 12754 // FIXME: Preserve PointeeTypeAsWritten. 12755 return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY), 12756 PX->isSpelledAsLValue() || 12757 PY->isSpelledAsLValue()); 12758 } 12759 case Type::RValueReference: { 12760 const auto *PX = cast<RValueReferenceType>(X), 12761 *PY = cast<RValueReferenceType>(Y); 12762 // FIXME: Preserve PointeeTypeAsWritten. 12763 return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY)); 12764 } 12765 case Type::DependentAddressSpace: { 12766 const auto *PX = cast<DependentAddressSpaceType>(X), 12767 *PY = cast<DependentAddressSpaceType>(Y); 12768 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr())); 12769 return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY), 12770 PX->getAddrSpaceExpr(), 12771 getCommonAttrLoc(PX, PY)); 12772 } 12773 case Type::FunctionNoProto: { 12774 const auto *FX = cast<FunctionNoProtoType>(X), 12775 *FY = cast<FunctionNoProtoType>(Y); 12776 assert(FX->getExtInfo() == FY->getExtInfo()); 12777 return Ctx.getFunctionNoProtoType( 12778 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()), 12779 FX->getExtInfo()); 12780 } 12781 case Type::FunctionProto: { 12782 const auto *FX = cast<FunctionProtoType>(X), 12783 *FY = cast<FunctionProtoType>(Y); 12784 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(), 12785 EPIY = FY->getExtProtoInfo(); 12786 assert(EPIX.ExtInfo == EPIY.ExtInfo); 12787 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos); 12788 assert(EPIX.RefQualifier == EPIY.RefQualifier); 12789 assert(EPIX.TypeQuals == EPIY.TypeQuals); 12790 assert(EPIX.Variadic == EPIY.Variadic); 12791 12792 // FIXME: Can we handle an empty EllipsisLoc? 12793 // Use emtpy EllipsisLoc if X and Y differ. 12794 12795 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn; 12796 12797 QualType R = 12798 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()); 12799 auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(), 12800 /*Unqualified=*/true); 12801 12802 SmallVector<QualType, 8> Exceptions; 12803 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs( 12804 EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true); 12805 return Ctx.getFunctionType(R, P, EPIX); 12806 } 12807 case Type::ObjCObject: { 12808 const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y); 12809 assert( 12810 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(), 12811 OY->getProtocols().begin(), OY->getProtocols().end(), 12812 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) { 12813 return P0->getCanonicalDecl() == P1->getCanonicalDecl(); 12814 }) && 12815 "protocol lists must be the same"); 12816 auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(), 12817 OY->getTypeArgsAsWritten()); 12818 return Ctx.getObjCObjectType( 12819 Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs, 12820 OX->getProtocols(), 12821 OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten()); 12822 } 12823 case Type::ConstantMatrix: { 12824 const auto *MX = cast<ConstantMatrixType>(X), 12825 *MY = cast<ConstantMatrixType>(Y); 12826 assert(MX->getNumRows() == MY->getNumRows()); 12827 assert(MX->getNumColumns() == MY->getNumColumns()); 12828 return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY), 12829 MX->getNumRows(), MX->getNumColumns()); 12830 } 12831 case Type::DependentSizedMatrix: { 12832 const auto *MX = cast<DependentSizedMatrixType>(X), 12833 *MY = cast<DependentSizedMatrixType>(Y); 12834 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr())); 12835 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr())); 12836 return Ctx.getDependentSizedMatrixType( 12837 getCommonElementType(Ctx, MX, MY), MX->getRowExpr(), 12838 MX->getColumnExpr(), getCommonAttrLoc(MX, MY)); 12839 } 12840 case Type::Vector: { 12841 const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y); 12842 assert(VX->getNumElements() == VY->getNumElements()); 12843 assert(VX->getVectorKind() == VY->getVectorKind()); 12844 return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY), 12845 VX->getNumElements(), VX->getVectorKind()); 12846 } 12847 case Type::ExtVector: { 12848 const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y); 12849 assert(VX->getNumElements() == VY->getNumElements()); 12850 return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY), 12851 VX->getNumElements()); 12852 } 12853 case Type::DependentSizedExtVector: { 12854 const auto *VX = cast<DependentSizedExtVectorType>(X), 12855 *VY = cast<DependentSizedExtVectorType>(Y); 12856 return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY), 12857 getCommonSizeExpr(Ctx, VX, VY), 12858 getCommonAttrLoc(VX, VY)); 12859 } 12860 case Type::DependentVector: { 12861 const auto *VX = cast<DependentVectorType>(X), 12862 *VY = cast<DependentVectorType>(Y); 12863 assert(VX->getVectorKind() == VY->getVectorKind()); 12864 return Ctx.getDependentVectorType( 12865 getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY), 12866 getCommonAttrLoc(VX, VY), VX->getVectorKind()); 12867 } 12868 case Type::InjectedClassName: { 12869 const auto *IX = cast<InjectedClassNameType>(X), 12870 *IY = cast<InjectedClassNameType>(Y); 12871 return Ctx.getInjectedClassNameType( 12872 getCommonDeclChecked(IX->getDecl(), IY->getDecl()), 12873 Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(), 12874 IY->getInjectedSpecializationType())); 12875 } 12876 case Type::TemplateSpecialization: { 12877 const auto *TX = cast<TemplateSpecializationType>(X), 12878 *TY = cast<TemplateSpecializationType>(Y); 12879 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12880 TY->template_arguments()); 12881 return Ctx.getTemplateSpecializationType( 12882 ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(), 12883 TY->getTemplateName()), 12884 As, X->getCanonicalTypeInternal()); 12885 } 12886 case Type::Decltype: { 12887 const auto *DX = cast<DecltypeType>(X); 12888 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Y); 12889 assert(DX->isDependentType()); 12890 assert(DY->isDependentType()); 12891 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr())); 12892 // As Decltype is not uniqued, building a common type would be wasteful. 12893 return QualType(DX, 0); 12894 } 12895 case Type::DependentName: { 12896 const auto *NX = cast<DependentNameType>(X), 12897 *NY = cast<DependentNameType>(Y); 12898 assert(NX->getIdentifier() == NY->getIdentifier()); 12899 return Ctx.getDependentNameType( 12900 getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY), 12901 NX->getIdentifier(), NX->getCanonicalTypeInternal()); 12902 } 12903 case Type::DependentTemplateSpecialization: { 12904 const auto *TX = cast<DependentTemplateSpecializationType>(X), 12905 *TY = cast<DependentTemplateSpecializationType>(Y); 12906 assert(TX->getIdentifier() == TY->getIdentifier()); 12907 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12908 TY->template_arguments()); 12909 return Ctx.getDependentTemplateSpecializationType( 12910 getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY), 12911 TX->getIdentifier(), As); 12912 } 12913 case Type::UnaryTransform: { 12914 const auto *TX = cast<UnaryTransformType>(X), 12915 *TY = cast<UnaryTransformType>(Y); 12916 assert(TX->getUTTKind() == TY->getUTTKind()); 12917 return Ctx.getUnaryTransformType( 12918 Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()), 12919 Ctx.getCommonSugaredType(TX->getUnderlyingType(), 12920 TY->getUnderlyingType()), 12921 TX->getUTTKind()); 12922 } 12923 case Type::PackExpansion: { 12924 const auto *PX = cast<PackExpansionType>(X), 12925 *PY = cast<PackExpansionType>(Y); 12926 assert(PX->getNumExpansions() == PY->getNumExpansions()); 12927 return Ctx.getPackExpansionType( 12928 Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()), 12929 PX->getNumExpansions(), false); 12930 } 12931 case Type::Pipe: { 12932 const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y); 12933 assert(PX->isReadOnly() == PY->isReadOnly()); 12934 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType 12935 : &ASTContext::getWritePipeType; 12936 return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY)); 12937 } 12938 case Type::TemplateTypeParm: { 12939 const auto *TX = cast<TemplateTypeParmType>(X), 12940 *TY = cast<TemplateTypeParmType>(Y); 12941 assert(TX->getDepth() == TY->getDepth()); 12942 assert(TX->getIndex() == TY->getIndex()); 12943 assert(TX->isParameterPack() == TY->isParameterPack()); 12944 return Ctx.getTemplateTypeParmType( 12945 TX->getDepth(), TX->getIndex(), TX->isParameterPack(), 12946 getCommonDecl(TX->getDecl(), TY->getDecl())); 12947 } 12948 } 12949 llvm_unreachable("Unknown Type Class"); 12950 } 12951 12952 static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, 12953 const Type *Y, 12954 SplitQualType Underlying) { 12955 Type::TypeClass TC = X->getTypeClass(); 12956 if (TC != Y->getTypeClass()) 12957 return QualType(); 12958 switch (TC) { 12959 #define UNEXPECTED_TYPE(Class, Kind) \ 12960 case Type::Class: \ 12961 llvm_unreachable("Unexpected " Kind ": " #Class); 12962 #define TYPE(Class, Base) 12963 #define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent") 12964 #include "clang/AST/TypeNodes.inc" 12965 12966 #define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical") 12967 CANONICAL_TYPE(Atomic) 12968 CANONICAL_TYPE(BitInt) 12969 CANONICAL_TYPE(BlockPointer) 12970 CANONICAL_TYPE(Builtin) 12971 CANONICAL_TYPE(Complex) 12972 CANONICAL_TYPE(ConstantArray) 12973 CANONICAL_TYPE(ConstantMatrix) 12974 CANONICAL_TYPE(Enum) 12975 CANONICAL_TYPE(ExtVector) 12976 CANONICAL_TYPE(FunctionNoProto) 12977 CANONICAL_TYPE(FunctionProto) 12978 CANONICAL_TYPE(IncompleteArray) 12979 CANONICAL_TYPE(LValueReference) 12980 CANONICAL_TYPE(MemberPointer) 12981 CANONICAL_TYPE(ObjCInterface) 12982 CANONICAL_TYPE(ObjCObject) 12983 CANONICAL_TYPE(ObjCObjectPointer) 12984 CANONICAL_TYPE(Pipe) 12985 CANONICAL_TYPE(Pointer) 12986 CANONICAL_TYPE(Record) 12987 CANONICAL_TYPE(RValueReference) 12988 CANONICAL_TYPE(VariableArray) 12989 CANONICAL_TYPE(Vector) 12990 #undef CANONICAL_TYPE 12991 12992 #undef UNEXPECTED_TYPE 12993 12994 case Type::Adjusted: { 12995 const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y); 12996 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType(); 12997 if (!Ctx.hasSameType(OX, OY)) 12998 return QualType(); 12999 // FIXME: It's inefficient to have to unify the original types. 13000 return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY), 13001 Ctx.getQualifiedType(Underlying)); 13002 } 13003 case Type::Decayed: { 13004 const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y); 13005 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType(); 13006 if (!Ctx.hasSameType(OX, OY)) 13007 return QualType(); 13008 // FIXME: It's inefficient to have to unify the original types. 13009 return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY), 13010 Ctx.getQualifiedType(Underlying)); 13011 } 13012 case Type::Attributed: { 13013 const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y); 13014 AttributedType::Kind Kind = AX->getAttrKind(); 13015 if (Kind != AY->getAttrKind()) 13016 return QualType(); 13017 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType(); 13018 if (!Ctx.hasSameType(MX, MY)) 13019 return QualType(); 13020 // FIXME: It's inefficient to have to unify the modified types. 13021 return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY), 13022 Ctx.getQualifiedType(Underlying)); 13023 } 13024 case Type::BTFTagAttributed: { 13025 const auto *BX = cast<BTFTagAttributedType>(X); 13026 const BTFTypeTagAttr *AX = BX->getAttr(); 13027 // The attribute is not uniqued, so just compare the tag. 13028 if (AX->getBTFTypeTag() != 13029 cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag()) 13030 return QualType(); 13031 return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying)); 13032 } 13033 case Type::Auto: { 13034 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 13035 13036 AutoTypeKeyword KW = AX->getKeyword(); 13037 if (KW != AY->getKeyword()) 13038 return QualType(); 13039 13040 ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(), 13041 AY->getTypeConstraintConcept()); 13042 SmallVector<TemplateArgument, 8> As; 13043 if (CD && 13044 getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(), 13045 AY->getTypeConstraintArguments())) { 13046 CD = nullptr; // The arguments differ, so make it unconstrained. 13047 As.clear(); 13048 } 13049 13050 // Both auto types can't be dependent, otherwise they wouldn't have been 13051 // sugar. This implies they can't contain unexpanded packs either. 13052 return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), 13053 /*IsDependent=*/false, /*IsPack=*/false, CD, As); 13054 } 13055 case Type::Decltype: 13056 return QualType(); 13057 case Type::DeducedTemplateSpecialization: 13058 // FIXME: Try to merge these. 13059 return QualType(); 13060 13061 case Type::Elaborated: { 13062 const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y); 13063 return Ctx.getElaboratedType( 13064 ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY), 13065 Ctx.getQualifiedType(Underlying), 13066 ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl())); 13067 } 13068 case Type::MacroQualified: { 13069 const auto *MX = cast<MacroQualifiedType>(X), 13070 *MY = cast<MacroQualifiedType>(Y); 13071 const IdentifierInfo *IX = MX->getMacroIdentifier(); 13072 if (IX != MY->getMacroIdentifier()) 13073 return QualType(); 13074 return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX); 13075 } 13076 case Type::SubstTemplateTypeParm: { 13077 const auto *SX = cast<SubstTemplateTypeParmType>(X), 13078 *SY = cast<SubstTemplateTypeParmType>(Y); 13079 Decl *CD = 13080 ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl()); 13081 if (!CD) 13082 return QualType(); 13083 unsigned Index = SX->getIndex(); 13084 if (Index != SY->getIndex()) 13085 return QualType(); 13086 auto PackIndex = SX->getPackIndex(); 13087 if (PackIndex != SY->getPackIndex()) 13088 return QualType(); 13089 return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying), 13090 CD, Index, PackIndex); 13091 } 13092 case Type::ObjCTypeParam: 13093 // FIXME: Try to merge these. 13094 return QualType(); 13095 case Type::Paren: 13096 return Ctx.getParenType(Ctx.getQualifiedType(Underlying)); 13097 13098 case Type::TemplateSpecialization: { 13099 const auto *TX = cast<TemplateSpecializationType>(X), 13100 *TY = cast<TemplateSpecializationType>(Y); 13101 TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(), 13102 TY->getTemplateName()); 13103 if (!CTN.getAsVoidPointer()) 13104 return QualType(); 13105 SmallVector<TemplateArgument, 8> Args; 13106 if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(), 13107 TY->template_arguments())) 13108 return QualType(); 13109 return Ctx.getTemplateSpecializationType(CTN, Args, 13110 Ctx.getQualifiedType(Underlying)); 13111 } 13112 case Type::Typedef: { 13113 const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y); 13114 const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl()); 13115 if (!CD) 13116 return QualType(); 13117 return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying)); 13118 } 13119 case Type::TypeOf: { 13120 // The common sugar between two typeof expressions, where one is 13121 // potentially a typeof_unqual and the other is not, we unify to the 13122 // qualified type as that retains the most information along with the type. 13123 // We only return a typeof_unqual type when both types are unqual types. 13124 TypeOfKind Kind = TypeOfKind::Qualified; 13125 if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() && 13126 cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified) 13127 Kind = TypeOfKind::Unqualified; 13128 return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind); 13129 } 13130 case Type::TypeOfExpr: 13131 return QualType(); 13132 13133 case Type::UnaryTransform: { 13134 const auto *UX = cast<UnaryTransformType>(X), 13135 *UY = cast<UnaryTransformType>(Y); 13136 UnaryTransformType::UTTKind KX = UX->getUTTKind(); 13137 if (KX != UY->getUTTKind()) 13138 return QualType(); 13139 QualType BX = UX->getBaseType(), BY = UY->getBaseType(); 13140 if (!Ctx.hasSameType(BX, BY)) 13141 return QualType(); 13142 // FIXME: It's inefficient to have to unify the base types. 13143 return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY), 13144 Ctx.getQualifiedType(Underlying), KX); 13145 } 13146 case Type::Using: { 13147 const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y); 13148 const UsingShadowDecl *CD = 13149 ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl()); 13150 if (!CD) 13151 return QualType(); 13152 return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); 13153 } 13154 } 13155 llvm_unreachable("Unhandled Type Class"); 13156 } 13157 13158 static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) { 13159 SmallVector<SplitQualType, 8> R; 13160 while (true) { 13161 QTotal.addConsistentQualifiers(T.Quals); 13162 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); 13163 if (NT == QualType(T.Ty, 0)) 13164 break; 13165 R.push_back(T); 13166 T = NT.split(); 13167 } 13168 return R; 13169 } 13170 13171 QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, 13172 bool Unqualified) { 13173 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y)); 13174 if (X == Y) 13175 return X; 13176 if (!Unqualified) { 13177 if (X.isCanonical()) 13178 return X; 13179 if (Y.isCanonical()) 13180 return Y; 13181 } 13182 13183 SplitQualType SX = X.split(), SY = Y.split(); 13184 Qualifiers QX, QY; 13185 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys, 13186 // until we reach their underlying "canonical nodes". Note these are not 13187 // necessarily canonical types, as they may still have sugared properties. 13188 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively. 13189 auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY); 13190 if (SX.Ty != SY.Ty) { 13191 // The canonical nodes differ. Build a common canonical node out of the two, 13192 // unifying their sugar. This may recurse back here. 13193 SX.Ty = 13194 ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr(); 13195 } else { 13196 // The canonical nodes were identical: We may have desugared too much. 13197 // Add any common sugar back in. 13198 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) { 13199 QX -= SX.Quals; 13200 QY -= SY.Quals; 13201 SX = Xs.pop_back_val(); 13202 SY = Ys.pop_back_val(); 13203 } 13204 } 13205 if (Unqualified) 13206 QX = Qualifiers::removeCommonQualifiers(QX, QY); 13207 else 13208 assert(QX == QY); 13209 13210 // Even though the remaining sugar nodes in Xs and Ys differ, some may be 13211 // related. Walk up these nodes, unifying them and adding the result. 13212 while (!Xs.empty() && !Ys.empty()) { 13213 auto Underlying = SplitQualType( 13214 SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals)); 13215 SX = Xs.pop_back_val(); 13216 SY = Ys.pop_back_val(); 13217 SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying) 13218 .getTypePtrOrNull(); 13219 // Stop at the first pair which is unrelated. 13220 if (!SX.Ty) { 13221 SX.Ty = Underlying.Ty; 13222 break; 13223 } 13224 QX -= Underlying.Quals; 13225 }; 13226 13227 // Add back the missing accumulated qualifiers, which were stripped off 13228 // with the sugar nodes we could not unify. 13229 QualType R = getQualifiedType(SX.Ty, QX); 13230 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X)); 13231 return R; 13232 } 13233 13234 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 13235 assert(Ty->isFixedPointType()); 13236 13237 if (Ty->isSaturatedFixedPointType()) return Ty; 13238 13239 switch (Ty->castAs<BuiltinType>()->getKind()) { 13240 default: 13241 llvm_unreachable("Not a fixed point type!"); 13242 case BuiltinType::ShortAccum: 13243 return SatShortAccumTy; 13244 case BuiltinType::Accum: 13245 return SatAccumTy; 13246 case BuiltinType::LongAccum: 13247 return SatLongAccumTy; 13248 case BuiltinType::UShortAccum: 13249 return SatUnsignedShortAccumTy; 13250 case BuiltinType::UAccum: 13251 return SatUnsignedAccumTy; 13252 case BuiltinType::ULongAccum: 13253 return SatUnsignedLongAccumTy; 13254 case BuiltinType::ShortFract: 13255 return SatShortFractTy; 13256 case BuiltinType::Fract: 13257 return SatFractTy; 13258 case BuiltinType::LongFract: 13259 return SatLongFractTy; 13260 case BuiltinType::UShortFract: 13261 return SatUnsignedShortFractTy; 13262 case BuiltinType::UFract: 13263 return SatUnsignedFractTy; 13264 case BuiltinType::ULongFract: 13265 return SatUnsignedLongFractTy; 13266 } 13267 } 13268 13269 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 13270 if (LangOpts.OpenCL) 13271 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 13272 13273 if (LangOpts.CUDA) 13274 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 13275 13276 return getLangASFromTargetAS(AS); 13277 } 13278 13279 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 13280 // doesn't include ASTContext.h 13281 template 13282 clang::LazyGenerationalUpdatePtr< 13283 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 13284 clang::LazyGenerationalUpdatePtr< 13285 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 13286 const clang::ASTContext &Ctx, Decl *Value); 13287 13288 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 13289 assert(Ty->isFixedPointType()); 13290 13291 const TargetInfo &Target = getTargetInfo(); 13292 switch (Ty->castAs<BuiltinType>()->getKind()) { 13293 default: 13294 llvm_unreachable("Not a fixed point type!"); 13295 case BuiltinType::ShortAccum: 13296 case BuiltinType::SatShortAccum: 13297 return Target.getShortAccumScale(); 13298 case BuiltinType::Accum: 13299 case BuiltinType::SatAccum: 13300 return Target.getAccumScale(); 13301 case BuiltinType::LongAccum: 13302 case BuiltinType::SatLongAccum: 13303 return Target.getLongAccumScale(); 13304 case BuiltinType::UShortAccum: 13305 case BuiltinType::SatUShortAccum: 13306 return Target.getUnsignedShortAccumScale(); 13307 case BuiltinType::UAccum: 13308 case BuiltinType::SatUAccum: 13309 return Target.getUnsignedAccumScale(); 13310 case BuiltinType::ULongAccum: 13311 case BuiltinType::SatULongAccum: 13312 return Target.getUnsignedLongAccumScale(); 13313 case BuiltinType::ShortFract: 13314 case BuiltinType::SatShortFract: 13315 return Target.getShortFractScale(); 13316 case BuiltinType::Fract: 13317 case BuiltinType::SatFract: 13318 return Target.getFractScale(); 13319 case BuiltinType::LongFract: 13320 case BuiltinType::SatLongFract: 13321 return Target.getLongFractScale(); 13322 case BuiltinType::UShortFract: 13323 case BuiltinType::SatUShortFract: 13324 return Target.getUnsignedShortFractScale(); 13325 case BuiltinType::UFract: 13326 case BuiltinType::SatUFract: 13327 return Target.getUnsignedFractScale(); 13328 case BuiltinType::ULongFract: 13329 case BuiltinType::SatULongFract: 13330 return Target.getUnsignedLongFractScale(); 13331 } 13332 } 13333 13334 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 13335 assert(Ty->isFixedPointType()); 13336 13337 const TargetInfo &Target = getTargetInfo(); 13338 switch (Ty->castAs<BuiltinType>()->getKind()) { 13339 default: 13340 llvm_unreachable("Not a fixed point type!"); 13341 case BuiltinType::ShortAccum: 13342 case BuiltinType::SatShortAccum: 13343 return Target.getShortAccumIBits(); 13344 case BuiltinType::Accum: 13345 case BuiltinType::SatAccum: 13346 return Target.getAccumIBits(); 13347 case BuiltinType::LongAccum: 13348 case BuiltinType::SatLongAccum: 13349 return Target.getLongAccumIBits(); 13350 case BuiltinType::UShortAccum: 13351 case BuiltinType::SatUShortAccum: 13352 return Target.getUnsignedShortAccumIBits(); 13353 case BuiltinType::UAccum: 13354 case BuiltinType::SatUAccum: 13355 return Target.getUnsignedAccumIBits(); 13356 case BuiltinType::ULongAccum: 13357 case BuiltinType::SatULongAccum: 13358 return Target.getUnsignedLongAccumIBits(); 13359 case BuiltinType::ShortFract: 13360 case BuiltinType::SatShortFract: 13361 case BuiltinType::Fract: 13362 case BuiltinType::SatFract: 13363 case BuiltinType::LongFract: 13364 case BuiltinType::SatLongFract: 13365 case BuiltinType::UShortFract: 13366 case BuiltinType::SatUShortFract: 13367 case BuiltinType::UFract: 13368 case BuiltinType::SatUFract: 13369 case BuiltinType::ULongFract: 13370 case BuiltinType::SatULongFract: 13371 return 0; 13372 } 13373 } 13374 13375 llvm::FixedPointSemantics 13376 ASTContext::getFixedPointSemantics(QualType Ty) const { 13377 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 13378 "Can only get the fixed point semantics for a " 13379 "fixed point or integer type."); 13380 if (Ty->isIntegerType()) 13381 return llvm::FixedPointSemantics::GetIntegerSemantics( 13382 getIntWidth(Ty), Ty->isSignedIntegerType()); 13383 13384 bool isSigned = Ty->isSignedFixedPointType(); 13385 return llvm::FixedPointSemantics( 13386 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 13387 Ty->isSaturatedFixedPointType(), 13388 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 13389 } 13390 13391 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 13392 assert(Ty->isFixedPointType()); 13393 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 13394 } 13395 13396 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 13397 assert(Ty->isFixedPointType()); 13398 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 13399 } 13400 13401 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 13402 assert(Ty->isUnsignedFixedPointType() && 13403 "Expected unsigned fixed point type"); 13404 13405 switch (Ty->castAs<BuiltinType>()->getKind()) { 13406 case BuiltinType::UShortAccum: 13407 return ShortAccumTy; 13408 case BuiltinType::UAccum: 13409 return AccumTy; 13410 case BuiltinType::ULongAccum: 13411 return LongAccumTy; 13412 case BuiltinType::SatUShortAccum: 13413 return SatShortAccumTy; 13414 case BuiltinType::SatUAccum: 13415 return SatAccumTy; 13416 case BuiltinType::SatULongAccum: 13417 return SatLongAccumTy; 13418 case BuiltinType::UShortFract: 13419 return ShortFractTy; 13420 case BuiltinType::UFract: 13421 return FractTy; 13422 case BuiltinType::ULongFract: 13423 return LongFractTy; 13424 case BuiltinType::SatUShortFract: 13425 return SatShortFractTy; 13426 case BuiltinType::SatUFract: 13427 return SatFractTy; 13428 case BuiltinType::SatULongFract: 13429 return SatLongFractTy; 13430 default: 13431 llvm_unreachable("Unexpected unsigned fixed point type"); 13432 } 13433 } 13434 13435 std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs( 13436 const TargetVersionAttr *TV) const { 13437 assert(TV != nullptr); 13438 llvm::SmallVector<StringRef, 8> Feats; 13439 std::vector<std::string> ResFeats; 13440 TV->getFeatures(Feats); 13441 for (auto &Feature : Feats) 13442 if (Target->validateCpuSupports(Feature.str())) 13443 // Use '?' to mark features that came from TargetVersion. 13444 ResFeats.push_back("?" + Feature.str()); 13445 return ResFeats; 13446 } 13447 13448 ParsedTargetAttr 13449 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 13450 assert(TD != nullptr); 13451 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr()); 13452 13453 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 13454 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 13455 }); 13456 return ParsedAttr; 13457 } 13458 13459 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13460 const FunctionDecl *FD) const { 13461 if (FD) 13462 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 13463 else 13464 Target->initFeatureMap(FeatureMap, getDiagnostics(), 13465 Target->getTargetOpts().CPU, 13466 Target->getTargetOpts().Features); 13467 } 13468 13469 // Fills in the supplied string map with the set of target features for the 13470 // passed in function. 13471 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13472 GlobalDecl GD) const { 13473 StringRef TargetCPU = Target->getTargetOpts().CPU; 13474 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 13475 if (const auto *TD = FD->getAttr<TargetAttr>()) { 13476 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 13477 13478 // Make a copy of the features as passed on the command line into the 13479 // beginning of the additional features from the function to override. 13480 ParsedAttr.Features.insert( 13481 ParsedAttr.Features.begin(), 13482 Target->getTargetOpts().FeaturesAsWritten.begin(), 13483 Target->getTargetOpts().FeaturesAsWritten.end()); 13484 13485 if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) 13486 TargetCPU = ParsedAttr.CPU; 13487 13488 // Now populate the feature map, first with the TargetCPU which is either 13489 // the default or a new one from the target attribute string. Then we'll use 13490 // the passed in features (FeaturesAsWritten) along with the new ones from 13491 // the attribute. 13492 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 13493 ParsedAttr.Features); 13494 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 13495 llvm::SmallVector<StringRef, 32> FeaturesTmp; 13496 Target->getCPUSpecificCPUDispatchFeatures( 13497 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 13498 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 13499 Features.insert(Features.begin(), 13500 Target->getTargetOpts().FeaturesAsWritten.begin(), 13501 Target->getTargetOpts().FeaturesAsWritten.end()); 13502 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13503 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 13504 std::vector<std::string> Features; 13505 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 13506 if (Target->getTriple().isAArch64()) { 13507 // TargetClones for AArch64 13508 if (VersionStr != "default") { 13509 SmallVector<StringRef, 1> VersionFeatures; 13510 VersionStr.split(VersionFeatures, "+"); 13511 for (auto &VFeature : VersionFeatures) { 13512 VFeature = VFeature.trim(); 13513 // Use '?' to mark features that came from AArch64 TargetClones. 13514 Features.push_back((StringRef{"?"} + VFeature).str()); 13515 } 13516 } 13517 Features.insert(Features.begin(), 13518 Target->getTargetOpts().FeaturesAsWritten.begin(), 13519 Target->getTargetOpts().FeaturesAsWritten.end()); 13520 } else { 13521 if (VersionStr.starts_with("arch=")) 13522 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 13523 else if (VersionStr != "default") 13524 Features.push_back((StringRef{"+"} + VersionStr).str()); 13525 } 13526 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13527 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { 13528 std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV); 13529 Feats.insert(Feats.begin(), 13530 Target->getTargetOpts().FeaturesAsWritten.begin(), 13531 Target->getTargetOpts().FeaturesAsWritten.end()); 13532 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats); 13533 } else { 13534 FeatureMap = Target->getTargetOpts().FeatureMap; 13535 } 13536 } 13537 13538 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 13539 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 13540 return *OMPTraitInfoVector.back(); 13541 } 13542 13543 const StreamingDiagnostic &clang:: 13544 operator<<(const StreamingDiagnostic &DB, 13545 const ASTContext::SectionInfo &Section) { 13546 if (Section.Decl) 13547 return DB << Section.Decl; 13548 return DB << "a prior #pragma section"; 13549 } 13550 13551 bool ASTContext::mayExternalize(const Decl *D) const { 13552 bool IsInternalVar = 13553 isa<VarDecl>(D) && 13554 basicGVALinkageForVariable(*this, cast<VarDecl>(D)) == GVA_Internal; 13555 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 13556 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 13557 (D->hasAttr<CUDAConstantAttr>() && 13558 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 13559 // CUDA/HIP: managed variables need to be externalized since it is 13560 // a declaration in IR, therefore cannot have internal linkage. Kernels in 13561 // anonymous name space needs to be externalized to avoid duplicate symbols. 13562 return (IsInternalVar && 13563 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 13564 (D->hasAttr<CUDAGlobalAttr>() && 13565 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 13566 GVA_Internal); 13567 } 13568 13569 bool ASTContext::shouldExternalize(const Decl *D) const { 13570 return mayExternalize(D) && 13571 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 13572 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 13573 } 13574 13575 StringRef ASTContext::getCUIDHash() const { 13576 if (!CUIDHash.empty()) 13577 return CUIDHash; 13578 if (LangOpts.CUID.empty()) 13579 return StringRef(); 13580 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 13581 return CUIDHash; 13582 } 13583