1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/SourceLocation.h" 62 #include "clang/Basic/SourceManager.h" 63 #include "clang/Basic/Specifiers.h" 64 #include "clang/Basic/TargetCXXABI.h" 65 #include "clang/Basic/TargetInfo.h" 66 #include "clang/Basic/XRayLists.h" 67 #include "llvm/ADT/APFixedPoint.h" 68 #include "llvm/ADT/APInt.h" 69 #include "llvm/ADT/APSInt.h" 70 #include "llvm/ADT/ArrayRef.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/DenseSet.h" 73 #include "llvm/ADT/FoldingSet.h" 74 #include "llvm/ADT/None.h" 75 #include "llvm/ADT/Optional.h" 76 #include "llvm/ADT/PointerUnion.h" 77 #include "llvm/ADT/STLExtras.h" 78 #include "llvm/ADT/SmallPtrSet.h" 79 #include "llvm/ADT/SmallVector.h" 80 #include "llvm/ADT/StringExtras.h" 81 #include "llvm/ADT/StringRef.h" 82 #include "llvm/ADT/Triple.h" 83 #include "llvm/Support/Capacity.h" 84 #include "llvm/Support/Casting.h" 85 #include "llvm/Support/Compiler.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/MD5.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace clang; 102 103 enum FloatingRank { 104 BFloat16Rank, 105 Float16Rank, 106 HalfRank, 107 FloatRank, 108 DoubleRank, 109 LongDoubleRank, 110 Float128Rank, 111 Ibm128Rank 112 }; 113 114 /// \returns location that is relevant when searching for Doc comments related 115 /// to \p D. 116 static SourceLocation getDeclLocForCommentSearch(const Decl *D, 117 SourceManager &SourceMgr) { 118 assert(D); 119 120 // User can not attach documentation to implicit declarations. 121 if (D->isImplicit()) 122 return {}; 123 124 // User can not attach documentation to implicit instantiations. 125 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 126 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 127 return {}; 128 } 129 130 if (const auto *VD = dyn_cast<VarDecl>(D)) { 131 if (VD->isStaticDataMember() && 132 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 133 return {}; 134 } 135 136 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 137 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 138 return {}; 139 } 140 141 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 142 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 143 if (TSK == TSK_ImplicitInstantiation || 144 TSK == TSK_Undeclared) 145 return {}; 146 } 147 148 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 149 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 150 return {}; 151 } 152 if (const auto *TD = dyn_cast<TagDecl>(D)) { 153 // When tag declaration (but not definition!) is part of the 154 // decl-specifier-seq of some other declaration, it doesn't get comment 155 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 156 return {}; 157 } 158 // TODO: handle comments for function parameters properly. 159 if (isa<ParmVarDecl>(D)) 160 return {}; 161 162 // TODO: we could look up template parameter documentation in the template 163 // documentation. 164 if (isa<TemplateTypeParmDecl>(D) || 165 isa<NonTypeTemplateParmDecl>(D) || 166 isa<TemplateTemplateParmDecl>(D)) 167 return {}; 168 169 // Find declaration location. 170 // For Objective-C declarations we generally don't expect to have multiple 171 // declarators, thus use declaration starting location as the "declaration 172 // location". 173 // For all other declarations multiple declarators are used quite frequently, 174 // so we use the location of the identifier as the "declaration location". 175 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 176 isa<ObjCPropertyDecl>(D) || 177 isa<RedeclarableTemplateDecl>(D) || 178 isa<ClassTemplateSpecializationDecl>(D) || 179 // Allow association with Y across {} in `typedef struct X {} Y`. 180 isa<TypedefDecl>(D)) 181 return D->getBeginLoc(); 182 183 const SourceLocation DeclLoc = D->getLocation(); 184 if (DeclLoc.isMacroID()) { 185 if (isa<TypedefDecl>(D)) { 186 // If location of the typedef name is in a macro, it is because being 187 // declared via a macro. Try using declaration's starting location as 188 // the "declaration location". 189 return D->getBeginLoc(); 190 } 191 192 if (const auto *TD = dyn_cast<TagDecl>(D)) { 193 // If location of the tag decl is inside a macro, but the spelling of 194 // the tag name comes from a macro argument, it looks like a special 195 // macro like NS_ENUM is being used to define the tag decl. In that 196 // case, adjust the source location to the expansion loc so that we can 197 // attach the comment to the tag decl. 198 if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition()) 199 return SourceMgr.getExpansionLoc(DeclLoc); 200 } 201 } 202 203 return DeclLoc; 204 } 205 206 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 207 const Decl *D, const SourceLocation RepresentativeLocForDecl, 208 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 209 // If the declaration doesn't map directly to a location in a file, we 210 // can't find the comment. 211 if (RepresentativeLocForDecl.isInvalid() || 212 !RepresentativeLocForDecl.isFileID()) 213 return nullptr; 214 215 // If there are no comments anywhere, we won't find anything. 216 if (CommentsInTheFile.empty()) 217 return nullptr; 218 219 // Decompose the location for the declaration and find the beginning of the 220 // file buffer. 221 const std::pair<FileID, unsigned> DeclLocDecomp = 222 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 223 224 // Slow path. 225 auto OffsetCommentBehindDecl = 226 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 227 228 // First check whether we have a trailing comment. 229 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 230 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 231 if ((CommentBehindDecl->isDocumentation() || 232 LangOpts.CommentOpts.ParseAllComments) && 233 CommentBehindDecl->isTrailingComment() && 234 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 235 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 236 237 // Check that Doxygen trailing comment comes after the declaration, starts 238 // on the same line and in the same file as the declaration. 239 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 240 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 241 OffsetCommentBehindDecl->first)) { 242 return CommentBehindDecl; 243 } 244 } 245 } 246 247 // The comment just after the declaration was not a trailing comment. 248 // Let's look at the previous comment. 249 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 250 return nullptr; 251 252 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 253 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 254 255 // Check that we actually have a non-member Doxygen comment. 256 if (!(CommentBeforeDecl->isDocumentation() || 257 LangOpts.CommentOpts.ParseAllComments) || 258 CommentBeforeDecl->isTrailingComment()) 259 return nullptr; 260 261 // Decompose the end of the comment. 262 const unsigned CommentEndOffset = 263 Comments.getCommentEndOffset(CommentBeforeDecl); 264 265 // Get the corresponding buffer. 266 bool Invalid = false; 267 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 268 &Invalid).data(); 269 if (Invalid) 270 return nullptr; 271 272 // Extract text between the comment and declaration. 273 StringRef Text(Buffer + CommentEndOffset, 274 DeclLocDecomp.second - CommentEndOffset); 275 276 // There should be no other declarations or preprocessor directives between 277 // comment and declaration. 278 if (Text.find_first_of(";{}#@") != StringRef::npos) 279 return nullptr; 280 281 return CommentBeforeDecl; 282 } 283 284 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 285 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 286 287 // If the declaration doesn't map directly to a location in a file, we 288 // can't find the comment. 289 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 290 return nullptr; 291 292 if (ExternalSource && !CommentsLoaded) { 293 ExternalSource->ReadComments(); 294 CommentsLoaded = true; 295 } 296 297 if (Comments.empty()) 298 return nullptr; 299 300 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 301 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 302 if (!CommentsInThisFile || CommentsInThisFile->empty()) 303 return nullptr; 304 305 return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); 306 } 307 308 void ASTContext::addComment(const RawComment &RC) { 309 assert(LangOpts.RetainCommentsFromSystemHeaders || 310 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 311 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 312 } 313 314 /// If we have a 'templated' declaration for a template, adjust 'D' to 315 /// refer to the actual template. 316 /// If we have an implicit instantiation, adjust 'D' to refer to template. 317 static const Decl &adjustDeclToTemplate(const Decl &D) { 318 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 319 // Is this function declaration part of a function template? 320 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 321 return *FTD; 322 323 // Nothing to do if function is not an implicit instantiation. 324 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 325 return D; 326 327 // Function is an implicit instantiation of a function template? 328 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 329 return *FTD; 330 331 // Function is instantiated from a member definition of a class template? 332 if (const FunctionDecl *MemberDecl = 333 FD->getInstantiatedFromMemberFunction()) 334 return *MemberDecl; 335 336 return D; 337 } 338 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 339 // Static data member is instantiated from a member definition of a class 340 // template? 341 if (VD->isStaticDataMember()) 342 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 343 return *MemberDecl; 344 345 return D; 346 } 347 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 348 // Is this class declaration part of a class template? 349 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 350 return *CTD; 351 352 // Class is an implicit instantiation of a class template or partial 353 // specialization? 354 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 355 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 356 return D; 357 llvm::PointerUnion<ClassTemplateDecl *, 358 ClassTemplatePartialSpecializationDecl *> 359 PU = CTSD->getSpecializedTemplateOrPartial(); 360 return PU.is<ClassTemplateDecl *>() 361 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 362 : *static_cast<const Decl *>( 363 PU.get<ClassTemplatePartialSpecializationDecl *>()); 364 } 365 366 // Class is instantiated from a member definition of a class template? 367 if (const MemberSpecializationInfo *Info = 368 CRD->getMemberSpecializationInfo()) 369 return *Info->getInstantiatedFrom(); 370 371 return D; 372 } 373 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 374 // Enum is instantiated from a member definition of a class template? 375 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 376 return *MemberDecl; 377 378 return D; 379 } 380 // FIXME: Adjust alias templates? 381 return D; 382 } 383 384 const RawComment *ASTContext::getRawCommentForAnyRedecl( 385 const Decl *D, 386 const Decl **OriginalDecl) const { 387 if (!D) { 388 if (OriginalDecl) 389 OriginalDecl = nullptr; 390 return nullptr; 391 } 392 393 D = &adjustDeclToTemplate(*D); 394 395 // Any comment directly attached to D? 396 { 397 auto DeclComment = DeclRawComments.find(D); 398 if (DeclComment != DeclRawComments.end()) { 399 if (OriginalDecl) 400 *OriginalDecl = D; 401 return DeclComment->second; 402 } 403 } 404 405 // Any comment attached to any redeclaration of D? 406 const Decl *CanonicalD = D->getCanonicalDecl(); 407 if (!CanonicalD) 408 return nullptr; 409 410 { 411 auto RedeclComment = RedeclChainComments.find(CanonicalD); 412 if (RedeclComment != RedeclChainComments.end()) { 413 if (OriginalDecl) 414 *OriginalDecl = RedeclComment->second; 415 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 416 assert(CommentAtRedecl != DeclRawComments.end() && 417 "This decl is supposed to have comment attached."); 418 return CommentAtRedecl->second; 419 } 420 } 421 422 // Any redeclarations of D that we haven't checked for comments yet? 423 // We can't use DenseMap::iterator directly since it'd get invalid. 424 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 425 auto LookupRes = CommentlessRedeclChains.find(CanonicalD); 426 if (LookupRes != CommentlessRedeclChains.end()) 427 return LookupRes->second; 428 return nullptr; 429 }(); 430 431 for (const auto Redecl : D->redecls()) { 432 assert(Redecl); 433 // Skip all redeclarations that have been checked previously. 434 if (LastCheckedRedecl) { 435 if (LastCheckedRedecl == Redecl) { 436 LastCheckedRedecl = nullptr; 437 } 438 continue; 439 } 440 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 441 if (RedeclComment) { 442 cacheRawCommentForDecl(*Redecl, *RedeclComment); 443 if (OriginalDecl) 444 *OriginalDecl = Redecl; 445 return RedeclComment; 446 } 447 CommentlessRedeclChains[CanonicalD] = Redecl; 448 } 449 450 if (OriginalDecl) 451 *OriginalDecl = nullptr; 452 return nullptr; 453 } 454 455 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 456 const RawComment &Comment) const { 457 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 458 DeclRawComments.try_emplace(&OriginalD, &Comment); 459 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 460 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 461 CommentlessRedeclChains.erase(CanonicalDecl); 462 } 463 464 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 465 SmallVectorImpl<const NamedDecl *> &Redeclared) { 466 const DeclContext *DC = ObjCMethod->getDeclContext(); 467 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 468 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 469 if (!ID) 470 return; 471 // Add redeclared method here. 472 for (const auto *Ext : ID->known_extensions()) { 473 if (ObjCMethodDecl *RedeclaredMethod = 474 Ext->getMethod(ObjCMethod->getSelector(), 475 ObjCMethod->isInstanceMethod())) 476 Redeclared.push_back(RedeclaredMethod); 477 } 478 } 479 } 480 481 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 482 const Preprocessor *PP) { 483 if (Comments.empty() || Decls.empty()) 484 return; 485 486 FileID File; 487 for (Decl *D : Decls) { 488 SourceLocation Loc = D->getLocation(); 489 if (Loc.isValid()) { 490 // See if there are any new comments that are not attached to a decl. 491 // The location doesn't have to be precise - we care only about the file. 492 File = SourceMgr.getDecomposedLoc(Loc).first; 493 break; 494 } 495 } 496 497 if (File.isInvalid()) 498 return; 499 500 auto CommentsInThisFile = Comments.getCommentsInFile(File); 501 if (!CommentsInThisFile || CommentsInThisFile->empty() || 502 CommentsInThisFile->rbegin()->second->isAttached()) 503 return; 504 505 // There is at least one comment not attached to a decl. 506 // Maybe it should be attached to one of Decls? 507 // 508 // Note that this way we pick up not only comments that precede the 509 // declaration, but also comments that *follow* the declaration -- thanks to 510 // the lookahead in the lexer: we've consumed the semicolon and looked 511 // ahead through comments. 512 513 for (const Decl *D : Decls) { 514 assert(D); 515 if (D->isInvalidDecl()) 516 continue; 517 518 D = &adjustDeclToTemplate(*D); 519 520 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 521 522 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 523 continue; 524 525 if (DeclRawComments.count(D) > 0) 526 continue; 527 528 if (RawComment *const DocComment = 529 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { 530 cacheRawCommentForDecl(*D, *DocComment); 531 comments::FullComment *FC = DocComment->parse(*this, PP, D); 532 ParsedComments[D->getCanonicalDecl()] = FC; 533 } 534 } 535 } 536 537 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 538 const Decl *D) const { 539 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 540 ThisDeclInfo->CommentDecl = D; 541 ThisDeclInfo->IsFilled = false; 542 ThisDeclInfo->fill(); 543 ThisDeclInfo->CommentDecl = FC->getDecl(); 544 if (!ThisDeclInfo->TemplateParameters) 545 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 546 comments::FullComment *CFC = 547 new (*this) comments::FullComment(FC->getBlocks(), 548 ThisDeclInfo); 549 return CFC; 550 } 551 552 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 553 const RawComment *RC = getRawCommentForDeclNoCache(D); 554 return RC ? RC->parse(*this, nullptr, D) : nullptr; 555 } 556 557 comments::FullComment *ASTContext::getCommentForDecl( 558 const Decl *D, 559 const Preprocessor *PP) const { 560 if (!D || D->isInvalidDecl()) 561 return nullptr; 562 D = &adjustDeclToTemplate(*D); 563 564 const Decl *Canonical = D->getCanonicalDecl(); 565 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 566 ParsedComments.find(Canonical); 567 568 if (Pos != ParsedComments.end()) { 569 if (Canonical != D) { 570 comments::FullComment *FC = Pos->second; 571 comments::FullComment *CFC = cloneFullComment(FC, D); 572 return CFC; 573 } 574 return Pos->second; 575 } 576 577 const Decl *OriginalDecl = nullptr; 578 579 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 580 if (!RC) { 581 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 582 SmallVector<const NamedDecl*, 8> Overridden; 583 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 584 if (OMD && OMD->isPropertyAccessor()) 585 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 586 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 587 return cloneFullComment(FC, D); 588 if (OMD) 589 addRedeclaredMethods(OMD, Overridden); 590 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 591 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 592 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 593 return cloneFullComment(FC, D); 594 } 595 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 596 // Attach any tag type's documentation to its typedef if latter 597 // does not have one of its own. 598 QualType QT = TD->getUnderlyingType(); 599 if (const auto *TT = QT->getAs<TagType>()) 600 if (const Decl *TD = TT->getDecl()) 601 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 602 return cloneFullComment(FC, D); 603 } 604 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 605 while (IC->getSuperClass()) { 606 IC = IC->getSuperClass(); 607 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 608 return cloneFullComment(FC, D); 609 } 610 } 611 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 612 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 613 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 614 return cloneFullComment(FC, D); 615 } 616 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 617 if (!(RD = RD->getDefinition())) 618 return nullptr; 619 // Check non-virtual bases. 620 for (const auto &I : RD->bases()) { 621 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 622 continue; 623 QualType Ty = I.getType(); 624 if (Ty.isNull()) 625 continue; 626 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 627 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 628 continue; 629 630 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 631 return cloneFullComment(FC, D); 632 } 633 } 634 // Check virtual bases. 635 for (const auto &I : RD->vbases()) { 636 if (I.getAccessSpecifier() != AS_public) 637 continue; 638 QualType Ty = I.getType(); 639 if (Ty.isNull()) 640 continue; 641 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 642 if (!(VirtualBase= VirtualBase->getDefinition())) 643 continue; 644 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 645 return cloneFullComment(FC, D); 646 } 647 } 648 } 649 return nullptr; 650 } 651 652 // If the RawComment was attached to other redeclaration of this Decl, we 653 // should parse the comment in context of that other Decl. This is important 654 // because comments can contain references to parameter names which can be 655 // different across redeclarations. 656 if (D != OriginalDecl && OriginalDecl) 657 return getCommentForDecl(OriginalDecl, PP); 658 659 comments::FullComment *FC = RC->parse(*this, PP, D); 660 ParsedComments[Canonical] = FC; 661 return FC; 662 } 663 664 void 665 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 666 const ASTContext &C, 667 TemplateTemplateParmDecl *Parm) { 668 ID.AddInteger(Parm->getDepth()); 669 ID.AddInteger(Parm->getPosition()); 670 ID.AddBoolean(Parm->isParameterPack()); 671 672 TemplateParameterList *Params = Parm->getTemplateParameters(); 673 ID.AddInteger(Params->size()); 674 for (TemplateParameterList::const_iterator P = Params->begin(), 675 PEnd = Params->end(); 676 P != PEnd; ++P) { 677 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 678 ID.AddInteger(0); 679 ID.AddBoolean(TTP->isParameterPack()); 680 const TypeConstraint *TC = TTP->getTypeConstraint(); 681 ID.AddBoolean(TC != nullptr); 682 if (TC) 683 TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, 684 /*Canonical=*/true); 685 if (TTP->isExpandedParameterPack()) { 686 ID.AddBoolean(true); 687 ID.AddInteger(TTP->getNumExpansionParameters()); 688 } else 689 ID.AddBoolean(false); 690 continue; 691 } 692 693 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 694 ID.AddInteger(1); 695 ID.AddBoolean(NTTP->isParameterPack()); 696 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); 697 if (NTTP->isExpandedParameterPack()) { 698 ID.AddBoolean(true); 699 ID.AddInteger(NTTP->getNumExpansionTypes()); 700 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 701 QualType T = NTTP->getExpansionType(I); 702 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 703 } 704 } else 705 ID.AddBoolean(false); 706 continue; 707 } 708 709 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 710 ID.AddInteger(2); 711 Profile(ID, C, TTP); 712 } 713 Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); 714 ID.AddBoolean(RequiresClause != nullptr); 715 if (RequiresClause) 716 RequiresClause->Profile(ID, C, /*Canonical=*/true); 717 } 718 719 static Expr * 720 canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, 721 QualType ConstrainedType) { 722 // This is a bit ugly - we need to form a new immediately-declared 723 // constraint that references the new parameter; this would ideally 724 // require semantic analysis (e.g. template<C T> struct S {}; - the 725 // converted arguments of C<T> could be an argument pack if C is 726 // declared as template<typename... T> concept C = ...). 727 // We don't have semantic analysis here so we dig deep into the 728 // ready-made constraint expr and change the thing manually. 729 ConceptSpecializationExpr *CSE; 730 if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC)) 731 CSE = cast<ConceptSpecializationExpr>(Fold->getLHS()); 732 else 733 CSE = cast<ConceptSpecializationExpr>(IDC); 734 ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments(); 735 SmallVector<TemplateArgument, 3> NewConverted; 736 NewConverted.reserve(OldConverted.size()); 737 if (OldConverted.front().getKind() == TemplateArgument::Pack) { 738 // The case: 739 // template<typename... T> concept C = true; 740 // template<C<int> T> struct S; -> constraint is C<{T, int}> 741 NewConverted.push_back(ConstrainedType); 742 for (auto &Arg : OldConverted.front().pack_elements().drop_front(1)) 743 NewConverted.push_back(Arg); 744 TemplateArgument NewPack(NewConverted); 745 746 NewConverted.clear(); 747 NewConverted.push_back(NewPack); 748 assert(OldConverted.size() == 1 && 749 "Template parameter pack should be the last parameter"); 750 } else { 751 assert(OldConverted.front().getKind() == TemplateArgument::Type && 752 "Unexpected first argument kind for immediately-declared " 753 "constraint"); 754 NewConverted.push_back(ConstrainedType); 755 for (auto &Arg : OldConverted.drop_front(1)) 756 NewConverted.push_back(Arg); 757 } 758 Expr *NewIDC = ConceptSpecializationExpr::Create( 759 C, CSE->getNamedConcept(), NewConverted, nullptr, 760 CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack()); 761 762 if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC)) 763 NewIDC = new (C) CXXFoldExpr( 764 OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC, 765 BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, 766 SourceLocation(), /*NumExpansions=*/None); 767 return NewIDC; 768 } 769 770 TemplateTemplateParmDecl * 771 ASTContext::getCanonicalTemplateTemplateParmDecl( 772 TemplateTemplateParmDecl *TTP) const { 773 // Check if we already have a canonical template template parameter. 774 llvm::FoldingSetNodeID ID; 775 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 776 void *InsertPos = nullptr; 777 CanonicalTemplateTemplateParm *Canonical 778 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 779 if (Canonical) 780 return Canonical->getParam(); 781 782 // Build a canonical template parameter list. 783 TemplateParameterList *Params = TTP->getTemplateParameters(); 784 SmallVector<NamedDecl *, 4> CanonParams; 785 CanonParams.reserve(Params->size()); 786 for (TemplateParameterList::const_iterator P = Params->begin(), 787 PEnd = Params->end(); 788 P != PEnd; ++P) { 789 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 790 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this, 791 getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 792 TTP->getDepth(), TTP->getIndex(), nullptr, false, 793 TTP->isParameterPack(), TTP->hasTypeConstraint(), 794 TTP->isExpandedParameterPack() ? 795 llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None); 796 if (const auto *TC = TTP->getTypeConstraint()) { 797 QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); 798 Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( 799 *this, TC->getImmediatelyDeclaredConstraint(), 800 ParamAsArgument); 801 TemplateArgumentListInfo CanonArgsAsWritten; 802 if (auto *Args = TC->getTemplateArgsAsWritten()) 803 for (const auto &ArgLoc : Args->arguments()) 804 CanonArgsAsWritten.addArgument( 805 TemplateArgumentLoc(ArgLoc.getArgument(), 806 TemplateArgumentLocInfo())); 807 NewTTP->setTypeConstraint( 808 NestedNameSpecifierLoc(), 809 DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), 810 SourceLocation()), /*FoundDecl=*/nullptr, 811 // Actually canonicalizing a TemplateArgumentLoc is difficult so we 812 // simply omit the ArgsAsWritten 813 TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); 814 } 815 CanonParams.push_back(NewTTP); 816 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 817 QualType T = getCanonicalType(NTTP->getType()); 818 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 819 NonTypeTemplateParmDecl *Param; 820 if (NTTP->isExpandedParameterPack()) { 821 SmallVector<QualType, 2> ExpandedTypes; 822 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 823 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 824 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 825 ExpandedTInfos.push_back( 826 getTrivialTypeSourceInfo(ExpandedTypes.back())); 827 } 828 829 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 830 SourceLocation(), 831 SourceLocation(), 832 NTTP->getDepth(), 833 NTTP->getPosition(), nullptr, 834 T, 835 TInfo, 836 ExpandedTypes, 837 ExpandedTInfos); 838 } else { 839 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 840 SourceLocation(), 841 SourceLocation(), 842 NTTP->getDepth(), 843 NTTP->getPosition(), nullptr, 844 T, 845 NTTP->isParameterPack(), 846 TInfo); 847 } 848 if (AutoType *AT = T->getContainedAutoType()) { 849 if (AT->isConstrained()) { 850 Param->setPlaceholderTypeConstraint( 851 canonicalizeImmediatelyDeclaredConstraint( 852 *this, NTTP->getPlaceholderTypeConstraint(), T)); 853 } 854 } 855 CanonParams.push_back(Param); 856 857 } else 858 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 859 cast<TemplateTemplateParmDecl>(*P))); 860 } 861 862 Expr *CanonRequiresClause = nullptr; 863 if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) 864 CanonRequiresClause = RequiresClause; 865 866 TemplateTemplateParmDecl *CanonTTP 867 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 868 SourceLocation(), TTP->getDepth(), 869 TTP->getPosition(), 870 TTP->isParameterPack(), 871 nullptr, 872 TemplateParameterList::Create(*this, SourceLocation(), 873 SourceLocation(), 874 CanonParams, 875 SourceLocation(), 876 CanonRequiresClause)); 877 878 // Get the new insert position for the node we care about. 879 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 880 assert(!Canonical && "Shouldn't be in the map!"); 881 (void)Canonical; 882 883 // Create the canonical template template parameter entry. 884 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 885 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 886 return CanonTTP; 887 } 888 889 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 890 auto Kind = getTargetInfo().getCXXABI().getKind(); 891 return getLangOpts().CXXABI.getValueOr(Kind); 892 } 893 894 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 895 if (!LangOpts.CPlusPlus) return nullptr; 896 897 switch (getCXXABIKind()) { 898 case TargetCXXABI::AppleARM64: 899 case TargetCXXABI::Fuchsia: 900 case TargetCXXABI::GenericARM: // Same as Itanium at this level 901 case TargetCXXABI::iOS: 902 case TargetCXXABI::WatchOS: 903 case TargetCXXABI::GenericAArch64: 904 case TargetCXXABI::GenericMIPS: 905 case TargetCXXABI::GenericItanium: 906 case TargetCXXABI::WebAssembly: 907 case TargetCXXABI::XL: 908 return CreateItaniumCXXABI(*this); 909 case TargetCXXABI::Microsoft: 910 return CreateMicrosoftCXXABI(*this); 911 } 912 llvm_unreachable("Invalid CXXABI type!"); 913 } 914 915 interp::Context &ASTContext::getInterpContext() { 916 if (!InterpContext) { 917 InterpContext.reset(new interp::Context(*this)); 918 } 919 return *InterpContext.get(); 920 } 921 922 ParentMapContext &ASTContext::getParentMapContext() { 923 if (!ParentMapCtx) 924 ParentMapCtx.reset(new ParentMapContext(*this)); 925 return *ParentMapCtx.get(); 926 } 927 928 static const LangASMap *getAddressSpaceMap(const TargetInfo &T, 929 const LangOptions &LOpts) { 930 if (LOpts.FakeAddressSpaceMap) { 931 // The fake address space map must have a distinct entry for each 932 // language-specific address space. 933 static const unsigned FakeAddrSpaceMap[] = { 934 0, // Default 935 1, // opencl_global 936 3, // opencl_local 937 2, // opencl_constant 938 0, // opencl_private 939 4, // opencl_generic 940 5, // opencl_global_device 941 6, // opencl_global_host 942 7, // cuda_device 943 8, // cuda_constant 944 9, // cuda_shared 945 1, // sycl_global 946 5, // sycl_global_device 947 6, // sycl_global_host 948 3, // sycl_local 949 0, // sycl_private 950 10, // ptr32_sptr 951 11, // ptr32_uptr 952 12 // ptr64 953 }; 954 return &FakeAddrSpaceMap; 955 } else { 956 return &T.getAddressSpaceMap(); 957 } 958 } 959 960 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 961 const LangOptions &LangOpts) { 962 switch (LangOpts.getAddressSpaceMapMangling()) { 963 case LangOptions::ASMM_Target: 964 return TI.useAddressSpaceMapMangling(); 965 case LangOptions::ASMM_On: 966 return true; 967 case LangOptions::ASMM_Off: 968 return false; 969 } 970 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 971 } 972 973 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 974 IdentifierTable &idents, SelectorTable &sels, 975 Builtin::Context &builtins, TranslationUnitKind TUKind) 976 : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()), 977 TemplateSpecializationTypes(this_()), 978 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 979 SubstTemplateTemplateParmPacks(this_()), 980 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 981 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 982 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 983 LangOpts.XRayNeverInstrumentFiles, 984 LangOpts.XRayAttrListFiles, SM)), 985 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 986 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 987 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 988 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 989 CompCategories(this_()), LastSDM(nullptr, 0) { 990 addTranslationUnitDecl(); 991 } 992 993 void ASTContext::cleanup() { 994 // Release the DenseMaps associated with DeclContext objects. 995 // FIXME: Is this the ideal solution? 996 ReleaseDeclContextMaps(); 997 998 // Call all of the deallocation functions on all of their targets. 999 for (auto &Pair : Deallocations) 1000 (Pair.first)(Pair.second); 1001 Deallocations.clear(); 1002 1003 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 1004 // because they can contain DenseMaps. 1005 for (llvm::DenseMap<const ObjCContainerDecl*, 1006 const ASTRecordLayout*>::iterator 1007 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 1008 // Increment in loop to prevent using deallocated memory. 1009 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1010 R->Destroy(*this); 1011 ObjCLayouts.clear(); 1012 1013 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 1014 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 1015 // Increment in loop to prevent using deallocated memory. 1016 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1017 R->Destroy(*this); 1018 } 1019 ASTRecordLayouts.clear(); 1020 1021 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 1022 AEnd = DeclAttrs.end(); 1023 A != AEnd; ++A) 1024 A->second->~AttrVec(); 1025 DeclAttrs.clear(); 1026 1027 for (const auto &Value : ModuleInitializers) 1028 Value.second->~PerModuleInitializers(); 1029 ModuleInitializers.clear(); 1030 } 1031 1032 ASTContext::~ASTContext() { cleanup(); } 1033 1034 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 1035 TraversalScope = TopLevelDecls; 1036 getParentMapContext().clear(); 1037 } 1038 1039 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 1040 Deallocations.push_back({Callback, Data}); 1041 } 1042 1043 void 1044 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 1045 ExternalSource = std::move(Source); 1046 } 1047 1048 void ASTContext::PrintStats() const { 1049 llvm::errs() << "\n*** AST Context Stats:\n"; 1050 llvm::errs() << " " << Types.size() << " types total.\n"; 1051 1052 unsigned counts[] = { 1053 #define TYPE(Name, Parent) 0, 1054 #define ABSTRACT_TYPE(Name, Parent) 1055 #include "clang/AST/TypeNodes.inc" 1056 0 // Extra 1057 }; 1058 1059 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 1060 Type *T = Types[i]; 1061 counts[(unsigned)T->getTypeClass()]++; 1062 } 1063 1064 unsigned Idx = 0; 1065 unsigned TotalBytes = 0; 1066 #define TYPE(Name, Parent) \ 1067 if (counts[Idx]) \ 1068 llvm::errs() << " " << counts[Idx] << " " << #Name \ 1069 << " types, " << sizeof(Name##Type) << " each " \ 1070 << "(" << counts[Idx] * sizeof(Name##Type) \ 1071 << " bytes)\n"; \ 1072 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 1073 ++Idx; 1074 #define ABSTRACT_TYPE(Name, Parent) 1075 #include "clang/AST/TypeNodes.inc" 1076 1077 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 1078 1079 // Implicit special member functions. 1080 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 1081 << NumImplicitDefaultConstructors 1082 << " implicit default constructors created\n"; 1083 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 1084 << NumImplicitCopyConstructors 1085 << " implicit copy constructors created\n"; 1086 if (getLangOpts().CPlusPlus) 1087 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 1088 << NumImplicitMoveConstructors 1089 << " implicit move constructors created\n"; 1090 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 1091 << NumImplicitCopyAssignmentOperators 1092 << " implicit copy assignment operators created\n"; 1093 if (getLangOpts().CPlusPlus) 1094 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 1095 << NumImplicitMoveAssignmentOperators 1096 << " implicit move assignment operators created\n"; 1097 llvm::errs() << NumImplicitDestructorsDeclared << "/" 1098 << NumImplicitDestructors 1099 << " implicit destructors created\n"; 1100 1101 if (ExternalSource) { 1102 llvm::errs() << "\n"; 1103 ExternalSource->PrintStats(); 1104 } 1105 1106 BumpAlloc.PrintStats(); 1107 } 1108 1109 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1110 bool NotifyListeners) { 1111 if (NotifyListeners) 1112 if (auto *Listener = getASTMutationListener()) 1113 Listener->RedefinedHiddenDefinition(ND, M); 1114 1115 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1116 } 1117 1118 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1119 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1120 if (It == MergedDefModules.end()) 1121 return; 1122 1123 auto &Merged = It->second; 1124 llvm::DenseSet<Module*> Found; 1125 for (Module *&M : Merged) 1126 if (!Found.insert(M).second) 1127 M = nullptr; 1128 llvm::erase_value(Merged, nullptr); 1129 } 1130 1131 ArrayRef<Module *> 1132 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1133 auto MergedIt = 1134 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1135 if (MergedIt == MergedDefModules.end()) 1136 return None; 1137 return MergedIt->second; 1138 } 1139 1140 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1141 if (LazyInitializers.empty()) 1142 return; 1143 1144 auto *Source = Ctx.getExternalSource(); 1145 assert(Source && "lazy initializers but no external source"); 1146 1147 auto LazyInits = std::move(LazyInitializers); 1148 LazyInitializers.clear(); 1149 1150 for (auto ID : LazyInits) 1151 Initializers.push_back(Source->GetExternalDecl(ID)); 1152 1153 assert(LazyInitializers.empty() && 1154 "GetExternalDecl for lazy module initializer added more inits"); 1155 } 1156 1157 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1158 // One special case: if we add a module initializer that imports another 1159 // module, and that module's only initializer is an ImportDecl, simplify. 1160 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1161 auto It = ModuleInitializers.find(ID->getImportedModule()); 1162 1163 // Maybe the ImportDecl does nothing at all. (Common case.) 1164 if (It == ModuleInitializers.end()) 1165 return; 1166 1167 // Maybe the ImportDecl only imports another ImportDecl. 1168 auto &Imported = *It->second; 1169 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1170 Imported.resolve(*this); 1171 auto *OnlyDecl = Imported.Initializers.front(); 1172 if (isa<ImportDecl>(OnlyDecl)) 1173 D = OnlyDecl; 1174 } 1175 } 1176 1177 auto *&Inits = ModuleInitializers[M]; 1178 if (!Inits) 1179 Inits = new (*this) PerModuleInitializers; 1180 Inits->Initializers.push_back(D); 1181 } 1182 1183 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1184 auto *&Inits = ModuleInitializers[M]; 1185 if (!Inits) 1186 Inits = new (*this) PerModuleInitializers; 1187 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1188 IDs.begin(), IDs.end()); 1189 } 1190 1191 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1192 auto It = ModuleInitializers.find(M); 1193 if (It == ModuleInitializers.end()) 1194 return None; 1195 1196 auto *Inits = It->second; 1197 Inits->resolve(*this); 1198 return Inits->Initializers; 1199 } 1200 1201 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1202 if (!ExternCContext) 1203 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1204 1205 return ExternCContext; 1206 } 1207 1208 BuiltinTemplateDecl * 1209 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1210 const IdentifierInfo *II) const { 1211 auto *BuiltinTemplate = 1212 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1213 BuiltinTemplate->setImplicit(); 1214 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1215 1216 return BuiltinTemplate; 1217 } 1218 1219 BuiltinTemplateDecl * 1220 ASTContext::getMakeIntegerSeqDecl() const { 1221 if (!MakeIntegerSeqDecl) 1222 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1223 getMakeIntegerSeqName()); 1224 return MakeIntegerSeqDecl; 1225 } 1226 1227 BuiltinTemplateDecl * 1228 ASTContext::getTypePackElementDecl() const { 1229 if (!TypePackElementDecl) 1230 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1231 getTypePackElementName()); 1232 return TypePackElementDecl; 1233 } 1234 1235 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1236 RecordDecl::TagKind TK) const { 1237 SourceLocation Loc; 1238 RecordDecl *NewDecl; 1239 if (getLangOpts().CPlusPlus) 1240 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1241 Loc, &Idents.get(Name)); 1242 else 1243 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1244 &Idents.get(Name)); 1245 NewDecl->setImplicit(); 1246 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1247 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1248 return NewDecl; 1249 } 1250 1251 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1252 StringRef Name) const { 1253 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1254 TypedefDecl *NewDecl = TypedefDecl::Create( 1255 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1256 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1257 NewDecl->setImplicit(); 1258 return NewDecl; 1259 } 1260 1261 TypedefDecl *ASTContext::getInt128Decl() const { 1262 if (!Int128Decl) 1263 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1264 return Int128Decl; 1265 } 1266 1267 TypedefDecl *ASTContext::getUInt128Decl() const { 1268 if (!UInt128Decl) 1269 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1270 return UInt128Decl; 1271 } 1272 1273 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1274 auto *Ty = new (*this, TypeAlignment) BuiltinType(K); 1275 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1276 Types.push_back(Ty); 1277 } 1278 1279 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1280 const TargetInfo *AuxTarget) { 1281 assert((!this->Target || this->Target == &Target) && 1282 "Incorrect target reinitialization"); 1283 assert(VoidTy.isNull() && "Context reinitialized?"); 1284 1285 this->Target = &Target; 1286 this->AuxTarget = AuxTarget; 1287 1288 ABI.reset(createCXXABI(Target)); 1289 AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); 1290 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1291 1292 // C99 6.2.5p19. 1293 InitBuiltinType(VoidTy, BuiltinType::Void); 1294 1295 // C99 6.2.5p2. 1296 InitBuiltinType(BoolTy, BuiltinType::Bool); 1297 // C99 6.2.5p3. 1298 if (LangOpts.CharIsSigned) 1299 InitBuiltinType(CharTy, BuiltinType::Char_S); 1300 else 1301 InitBuiltinType(CharTy, BuiltinType::Char_U); 1302 // C99 6.2.5p4. 1303 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1304 InitBuiltinType(ShortTy, BuiltinType::Short); 1305 InitBuiltinType(IntTy, BuiltinType::Int); 1306 InitBuiltinType(LongTy, BuiltinType::Long); 1307 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1308 1309 // C99 6.2.5p6. 1310 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1311 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1312 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1313 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1314 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1315 1316 // C99 6.2.5p10. 1317 InitBuiltinType(FloatTy, BuiltinType::Float); 1318 InitBuiltinType(DoubleTy, BuiltinType::Double); 1319 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1320 1321 // GNU extension, __float128 for IEEE quadruple precision 1322 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1323 1324 // __ibm128 for IBM extended precision 1325 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1326 1327 // C11 extension ISO/IEC TS 18661-3 1328 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1329 1330 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1331 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1332 InitBuiltinType(AccumTy, BuiltinType::Accum); 1333 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1334 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1335 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1336 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1337 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1338 InitBuiltinType(FractTy, BuiltinType::Fract); 1339 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1340 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1341 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1342 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1343 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1344 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1345 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1346 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1347 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1348 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1349 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1350 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1351 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1352 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1353 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1354 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1355 1356 // GNU extension, 128-bit integers. 1357 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1358 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1359 1360 // C++ 3.9.1p5 1361 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1362 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1363 else // -fshort-wchar makes wchar_t be unsigned. 1364 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1365 if (LangOpts.CPlusPlus && LangOpts.WChar) 1366 WideCharTy = WCharTy; 1367 else { 1368 // C99 (or C++ using -fno-wchar). 1369 WideCharTy = getFromTargetType(Target.getWCharType()); 1370 } 1371 1372 WIntTy = getFromTargetType(Target.getWIntType()); 1373 1374 // C++20 (proposed) 1375 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1376 1377 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1378 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1379 else // C99 1380 Char16Ty = getFromTargetType(Target.getChar16Type()); 1381 1382 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1383 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1384 else // C99 1385 Char32Ty = getFromTargetType(Target.getChar32Type()); 1386 1387 // Placeholder type for type-dependent expressions whose type is 1388 // completely unknown. No code should ever check a type against 1389 // DependentTy and users should never see it; however, it is here to 1390 // help diagnose failures to properly check for type-dependent 1391 // expressions. 1392 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1393 1394 // Placeholder type for functions. 1395 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1396 1397 // Placeholder type for bound members. 1398 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1399 1400 // Placeholder type for pseudo-objects. 1401 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1402 1403 // "any" type; useful for debugger-like clients. 1404 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1405 1406 // Placeholder type for unbridged ARC casts. 1407 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1408 1409 // Placeholder type for builtin functions. 1410 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1411 1412 // Placeholder type for OMP array sections. 1413 if (LangOpts.OpenMP) { 1414 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1415 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1416 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1417 } 1418 if (LangOpts.MatrixTypes) 1419 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1420 1421 // Builtin types for 'id', 'Class', and 'SEL'. 1422 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1423 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1424 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1425 1426 if (LangOpts.OpenCL) { 1427 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1428 InitBuiltinType(SingletonId, BuiltinType::Id); 1429 #include "clang/Basic/OpenCLImageTypes.def" 1430 1431 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1432 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1433 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1434 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1435 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1436 1437 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1438 InitBuiltinType(Id##Ty, BuiltinType::Id); 1439 #include "clang/Basic/OpenCLExtensionTypes.def" 1440 } 1441 1442 if (Target.hasAArch64SVETypes()) { 1443 #define SVE_TYPE(Name, Id, SingletonId) \ 1444 InitBuiltinType(SingletonId, BuiltinType::Id); 1445 #include "clang/Basic/AArch64SVEACLETypes.def" 1446 } 1447 1448 if (Target.getTriple().isPPC64()) { 1449 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1450 InitBuiltinType(Id##Ty, BuiltinType::Id); 1451 #include "clang/Basic/PPCTypes.def" 1452 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1453 InitBuiltinType(Id##Ty, BuiltinType::Id); 1454 #include "clang/Basic/PPCTypes.def" 1455 } 1456 1457 if (Target.hasRISCVVTypes()) { 1458 #define RVV_TYPE(Name, Id, SingletonId) \ 1459 InitBuiltinType(SingletonId, BuiltinType::Id); 1460 #include "clang/Basic/RISCVVTypes.def" 1461 } 1462 1463 // Builtin type for __objc_yes and __objc_no 1464 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1465 SignedCharTy : BoolTy); 1466 1467 ObjCConstantStringType = QualType(); 1468 1469 ObjCSuperType = QualType(); 1470 1471 // void * type 1472 if (LangOpts.OpenCLGenericAddressSpace) { 1473 auto Q = VoidTy.getQualifiers(); 1474 Q.setAddressSpace(LangAS::opencl_generic); 1475 VoidPtrTy = getPointerType(getCanonicalType( 1476 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1477 } else { 1478 VoidPtrTy = getPointerType(VoidTy); 1479 } 1480 1481 // nullptr type (C++0x 2.14.7) 1482 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1483 1484 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1485 InitBuiltinType(HalfTy, BuiltinType::Half); 1486 1487 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1488 1489 // Builtin type used to help define __builtin_va_list. 1490 VaListTagDecl = nullptr; 1491 1492 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1493 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1494 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1495 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1496 } 1497 } 1498 1499 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1500 return SourceMgr.getDiagnostics(); 1501 } 1502 1503 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1504 AttrVec *&Result = DeclAttrs[D]; 1505 if (!Result) { 1506 void *Mem = Allocate(sizeof(AttrVec)); 1507 Result = new (Mem) AttrVec; 1508 } 1509 1510 return *Result; 1511 } 1512 1513 /// Erase the attributes corresponding to the given declaration. 1514 void ASTContext::eraseDeclAttrs(const Decl *D) { 1515 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1516 if (Pos != DeclAttrs.end()) { 1517 Pos->second->~AttrVec(); 1518 DeclAttrs.erase(Pos); 1519 } 1520 } 1521 1522 // FIXME: Remove ? 1523 MemberSpecializationInfo * 1524 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1525 assert(Var->isStaticDataMember() && "Not a static data member"); 1526 return getTemplateOrSpecializationInfo(Var) 1527 .dyn_cast<MemberSpecializationInfo *>(); 1528 } 1529 1530 ASTContext::TemplateOrSpecializationInfo 1531 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1532 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1533 TemplateOrInstantiation.find(Var); 1534 if (Pos == TemplateOrInstantiation.end()) 1535 return {}; 1536 1537 return Pos->second; 1538 } 1539 1540 void 1541 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1542 TemplateSpecializationKind TSK, 1543 SourceLocation PointOfInstantiation) { 1544 assert(Inst->isStaticDataMember() && "Not a static data member"); 1545 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1546 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1547 Tmpl, TSK, PointOfInstantiation)); 1548 } 1549 1550 void 1551 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1552 TemplateOrSpecializationInfo TSI) { 1553 assert(!TemplateOrInstantiation[Inst] && 1554 "Already noted what the variable was instantiated from"); 1555 TemplateOrInstantiation[Inst] = TSI; 1556 } 1557 1558 NamedDecl * 1559 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1560 auto Pos = InstantiatedFromUsingDecl.find(UUD); 1561 if (Pos == InstantiatedFromUsingDecl.end()) 1562 return nullptr; 1563 1564 return Pos->second; 1565 } 1566 1567 void 1568 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1569 assert((isa<UsingDecl>(Pattern) || 1570 isa<UnresolvedUsingValueDecl>(Pattern) || 1571 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1572 "pattern decl is not a using decl"); 1573 assert((isa<UsingDecl>(Inst) || 1574 isa<UnresolvedUsingValueDecl>(Inst) || 1575 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1576 "instantiation did not produce a using decl"); 1577 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1578 InstantiatedFromUsingDecl[Inst] = Pattern; 1579 } 1580 1581 UsingEnumDecl * 1582 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1583 auto Pos = InstantiatedFromUsingEnumDecl.find(UUD); 1584 if (Pos == InstantiatedFromUsingEnumDecl.end()) 1585 return nullptr; 1586 1587 return Pos->second; 1588 } 1589 1590 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1591 UsingEnumDecl *Pattern) { 1592 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1593 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1594 } 1595 1596 UsingShadowDecl * 1597 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1598 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 1599 = InstantiatedFromUsingShadowDecl.find(Inst); 1600 if (Pos == InstantiatedFromUsingShadowDecl.end()) 1601 return nullptr; 1602 1603 return Pos->second; 1604 } 1605 1606 void 1607 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1608 UsingShadowDecl *Pattern) { 1609 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1610 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1611 } 1612 1613 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1614 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 1615 = InstantiatedFromUnnamedFieldDecl.find(Field); 1616 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 1617 return nullptr; 1618 1619 return Pos->second; 1620 } 1621 1622 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1623 FieldDecl *Tmpl) { 1624 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1625 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1626 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1627 "Already noted what unnamed field was instantiated from"); 1628 1629 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1630 } 1631 1632 ASTContext::overridden_cxx_method_iterator 1633 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1634 return overridden_methods(Method).begin(); 1635 } 1636 1637 ASTContext::overridden_cxx_method_iterator 1638 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1639 return overridden_methods(Method).end(); 1640 } 1641 1642 unsigned 1643 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1644 auto Range = overridden_methods(Method); 1645 return Range.end() - Range.begin(); 1646 } 1647 1648 ASTContext::overridden_method_range 1649 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1650 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1651 OverriddenMethods.find(Method->getCanonicalDecl()); 1652 if (Pos == OverriddenMethods.end()) 1653 return overridden_method_range(nullptr, nullptr); 1654 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1655 } 1656 1657 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1658 const CXXMethodDecl *Overridden) { 1659 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1660 OverriddenMethods[Method].push_back(Overridden); 1661 } 1662 1663 void ASTContext::getOverriddenMethods( 1664 const NamedDecl *D, 1665 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1666 assert(D); 1667 1668 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1669 Overridden.append(overridden_methods_begin(CXXMethod), 1670 overridden_methods_end(CXXMethod)); 1671 return; 1672 } 1673 1674 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1675 if (!Method) 1676 return; 1677 1678 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1679 Method->getOverriddenMethods(OverDecls); 1680 Overridden.append(OverDecls.begin(), OverDecls.end()); 1681 } 1682 1683 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1684 assert(!Import->getNextLocalImport() && 1685 "Import declaration already in the chain"); 1686 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1687 if (!FirstLocalImport) { 1688 FirstLocalImport = Import; 1689 LastLocalImport = Import; 1690 return; 1691 } 1692 1693 LastLocalImport->setNextLocalImport(Import); 1694 LastLocalImport = Import; 1695 } 1696 1697 //===----------------------------------------------------------------------===// 1698 // Type Sizing and Analysis 1699 //===----------------------------------------------------------------------===// 1700 1701 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1702 /// scalar floating point type. 1703 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1704 switch (T->castAs<BuiltinType>()->getKind()) { 1705 default: 1706 llvm_unreachable("Not a floating point type!"); 1707 case BuiltinType::BFloat16: 1708 return Target->getBFloat16Format(); 1709 case BuiltinType::Float16: 1710 case BuiltinType::Half: 1711 return Target->getHalfFormat(); 1712 case BuiltinType::Float: return Target->getFloatFormat(); 1713 case BuiltinType::Double: return Target->getDoubleFormat(); 1714 case BuiltinType::Ibm128: 1715 return Target->getIbm128Format(); 1716 case BuiltinType::LongDouble: 1717 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1718 return AuxTarget->getLongDoubleFormat(); 1719 return Target->getLongDoubleFormat(); 1720 case BuiltinType::Float128: 1721 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1722 return AuxTarget->getFloat128Format(); 1723 return Target->getFloat128Format(); 1724 } 1725 } 1726 1727 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1728 unsigned Align = Target->getCharWidth(); 1729 1730 bool UseAlignAttrOnly = false; 1731 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1732 Align = AlignFromAttr; 1733 1734 // __attribute__((aligned)) can increase or decrease alignment 1735 // *except* on a struct or struct member, where it only increases 1736 // alignment unless 'packed' is also specified. 1737 // 1738 // It is an error for alignas to decrease alignment, so we can 1739 // ignore that possibility; Sema should diagnose it. 1740 if (isa<FieldDecl>(D)) { 1741 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1742 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1743 } else { 1744 UseAlignAttrOnly = true; 1745 } 1746 } 1747 else if (isa<FieldDecl>(D)) 1748 UseAlignAttrOnly = 1749 D->hasAttr<PackedAttr>() || 1750 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1751 1752 // If we're using the align attribute only, just ignore everything 1753 // else about the declaration and its type. 1754 if (UseAlignAttrOnly) { 1755 // do nothing 1756 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1757 QualType T = VD->getType(); 1758 if (const auto *RT = T->getAs<ReferenceType>()) { 1759 if (ForAlignof) 1760 T = RT->getPointeeType(); 1761 else 1762 T = getPointerType(RT->getPointeeType()); 1763 } 1764 QualType BaseT = getBaseElementType(T); 1765 if (T->isFunctionType()) 1766 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1767 else if (!BaseT->isIncompleteType()) { 1768 // Adjust alignments of declarations with array type by the 1769 // large-array alignment on the target. 1770 if (const ArrayType *arrayType = getAsArrayType(T)) { 1771 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1772 if (!ForAlignof && MinWidth) { 1773 if (isa<VariableArrayType>(arrayType)) 1774 Align = std::max(Align, Target->getLargeArrayAlign()); 1775 else if (isa<ConstantArrayType>(arrayType) && 1776 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1777 Align = std::max(Align, Target->getLargeArrayAlign()); 1778 } 1779 } 1780 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1781 if (BaseT.getQualifiers().hasUnaligned()) 1782 Align = Target->getCharWidth(); 1783 if (const auto *VD = dyn_cast<VarDecl>(D)) { 1784 if (VD->hasGlobalStorage() && !ForAlignof) { 1785 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 1786 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1787 } 1788 } 1789 } 1790 1791 // Fields can be subject to extra alignment constraints, like if 1792 // the field is packed, the struct is packed, or the struct has a 1793 // a max-field-alignment constraint (#pragma pack). So calculate 1794 // the actual alignment of the field within the struct, and then 1795 // (as we're expected to) constrain that by the alignment of the type. 1796 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1797 const RecordDecl *Parent = Field->getParent(); 1798 // We can only produce a sensible answer if the record is valid. 1799 if (!Parent->isInvalidDecl()) { 1800 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1801 1802 // Start with the record's overall alignment. 1803 unsigned FieldAlign = toBits(Layout.getAlignment()); 1804 1805 // Use the GCD of that and the offset within the record. 1806 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1807 if (Offset > 0) { 1808 // Alignment is always a power of 2, so the GCD will be a power of 2, 1809 // which means we get to do this crazy thing instead of Euclid's. 1810 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1811 if (LowBitOfOffset < FieldAlign) 1812 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1813 } 1814 1815 Align = std::min(Align, FieldAlign); 1816 } 1817 } 1818 } 1819 1820 // Some targets have hard limitation on the maximum requestable alignment in 1821 // aligned attribute for static variables. 1822 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1823 const auto *VD = dyn_cast<VarDecl>(D); 1824 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1825 Align = std::min(Align, MaxAlignedAttr); 1826 1827 return toCharUnitsFromBits(Align); 1828 } 1829 1830 CharUnits ASTContext::getExnObjectAlignment() const { 1831 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1832 } 1833 1834 // getTypeInfoDataSizeInChars - Return the size of a type, in 1835 // chars. If the type is a record, its data size is returned. This is 1836 // the size of the memcpy that's performed when assigning this type 1837 // using a trivial copy/move assignment operator. 1838 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1839 TypeInfoChars Info = getTypeInfoInChars(T); 1840 1841 // In C++, objects can sometimes be allocated into the tail padding 1842 // of a base-class subobject. We decide whether that's possible 1843 // during class layout, so here we can just trust the layout results. 1844 if (getLangOpts().CPlusPlus) { 1845 if (const auto *RT = T->getAs<RecordType>()) { 1846 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1847 Info.Width = layout.getDataSize(); 1848 } 1849 } 1850 1851 return Info; 1852 } 1853 1854 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1855 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1856 TypeInfoChars 1857 static getConstantArrayInfoInChars(const ASTContext &Context, 1858 const ConstantArrayType *CAT) { 1859 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1860 uint64_t Size = CAT->getSize().getZExtValue(); 1861 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1862 (uint64_t)(-1)/Size) && 1863 "Overflow in array type char size evaluation"); 1864 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1865 unsigned Align = EltInfo.Align.getQuantity(); 1866 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1867 Context.getTargetInfo().getPointerWidth(0) == 64) 1868 Width = llvm::alignTo(Width, Align); 1869 return TypeInfoChars(CharUnits::fromQuantity(Width), 1870 CharUnits::fromQuantity(Align), 1871 EltInfo.AlignRequirement); 1872 } 1873 1874 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1875 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1876 return getConstantArrayInfoInChars(*this, CAT); 1877 TypeInfo Info = getTypeInfo(T); 1878 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1879 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1880 } 1881 1882 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1883 return getTypeInfoInChars(T.getTypePtr()); 1884 } 1885 1886 bool ASTContext::isAlignmentRequired(const Type *T) const { 1887 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1888 } 1889 1890 bool ASTContext::isAlignmentRequired(QualType T) const { 1891 return isAlignmentRequired(T.getTypePtr()); 1892 } 1893 1894 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1895 bool NeedsPreferredAlignment) const { 1896 // An alignment on a typedef overrides anything else. 1897 if (const auto *TT = T->getAs<TypedefType>()) 1898 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1899 return Align; 1900 1901 // If we have an (array of) complete type, we're done. 1902 T = getBaseElementType(T); 1903 if (!T->isIncompleteType()) 1904 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1905 1906 // If we had an array type, its element type might be a typedef 1907 // type with an alignment attribute. 1908 if (const auto *TT = T->getAs<TypedefType>()) 1909 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1910 return Align; 1911 1912 // Otherwise, see if the declaration of the type had an attribute. 1913 if (const auto *TT = T->getAs<TagType>()) 1914 return TT->getDecl()->getMaxAlignment(); 1915 1916 return 0; 1917 } 1918 1919 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1920 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1921 if (I != MemoizedTypeInfo.end()) 1922 return I->second; 1923 1924 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1925 TypeInfo TI = getTypeInfoImpl(T); 1926 MemoizedTypeInfo[T] = TI; 1927 return TI; 1928 } 1929 1930 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1931 /// method does not work on incomplete types. 1932 /// 1933 /// FIXME: Pointers into different addr spaces could have different sizes and 1934 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1935 /// should take a QualType, &c. 1936 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1937 uint64_t Width = 0; 1938 unsigned Align = 8; 1939 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1940 unsigned AS = 0; 1941 switch (T->getTypeClass()) { 1942 #define TYPE(Class, Base) 1943 #define ABSTRACT_TYPE(Class, Base) 1944 #define NON_CANONICAL_TYPE(Class, Base) 1945 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1946 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1947 case Type::Class: \ 1948 assert(!T->isDependentType() && "should not see dependent types here"); \ 1949 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1950 #include "clang/AST/TypeNodes.inc" 1951 llvm_unreachable("Should not see dependent types"); 1952 1953 case Type::FunctionNoProto: 1954 case Type::FunctionProto: 1955 // GCC extension: alignof(function) = 32 bits 1956 Width = 0; 1957 Align = 32; 1958 break; 1959 1960 case Type::IncompleteArray: 1961 case Type::VariableArray: 1962 case Type::ConstantArray: { 1963 // Model non-constant sized arrays as size zero, but track the alignment. 1964 uint64_t Size = 0; 1965 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1966 Size = CAT->getSize().getZExtValue(); 1967 1968 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1969 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1970 "Overflow in array type bit size evaluation"); 1971 Width = EltInfo.Width * Size; 1972 Align = EltInfo.Align; 1973 AlignRequirement = EltInfo.AlignRequirement; 1974 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1975 getTargetInfo().getPointerWidth(0) == 64) 1976 Width = llvm::alignTo(Width, Align); 1977 break; 1978 } 1979 1980 case Type::ExtVector: 1981 case Type::Vector: { 1982 const auto *VT = cast<VectorType>(T); 1983 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1984 Width = EltInfo.Width * VT->getNumElements(); 1985 Align = Width; 1986 // If the alignment is not a power of 2, round up to the next power of 2. 1987 // This happens for non-power-of-2 length vectors. 1988 if (Align & (Align-1)) { 1989 Align = llvm::NextPowerOf2(Align); 1990 Width = llvm::alignTo(Width, Align); 1991 } 1992 // Adjust the alignment based on the target max. 1993 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1994 if (TargetVectorAlign && TargetVectorAlign < Align) 1995 Align = TargetVectorAlign; 1996 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 1997 // Adjust the alignment for fixed-length SVE vectors. This is important 1998 // for non-power-of-2 vector lengths. 1999 Align = 128; 2000 else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 2001 // Adjust the alignment for fixed-length SVE predicates. 2002 Align = 16; 2003 break; 2004 } 2005 2006 case Type::ConstantMatrix: { 2007 const auto *MT = cast<ConstantMatrixType>(T); 2008 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 2009 // The internal layout of a matrix value is implementation defined. 2010 // Initially be ABI compatible with arrays with respect to alignment and 2011 // size. 2012 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 2013 Align = ElementInfo.Align; 2014 break; 2015 } 2016 2017 case Type::Builtin: 2018 switch (cast<BuiltinType>(T)->getKind()) { 2019 default: llvm_unreachable("Unknown builtin type!"); 2020 case BuiltinType::Void: 2021 // GCC extension: alignof(void) = 8 bits. 2022 Width = 0; 2023 Align = 8; 2024 break; 2025 case BuiltinType::Bool: 2026 Width = Target->getBoolWidth(); 2027 Align = Target->getBoolAlign(); 2028 break; 2029 case BuiltinType::Char_S: 2030 case BuiltinType::Char_U: 2031 case BuiltinType::UChar: 2032 case BuiltinType::SChar: 2033 case BuiltinType::Char8: 2034 Width = Target->getCharWidth(); 2035 Align = Target->getCharAlign(); 2036 break; 2037 case BuiltinType::WChar_S: 2038 case BuiltinType::WChar_U: 2039 Width = Target->getWCharWidth(); 2040 Align = Target->getWCharAlign(); 2041 break; 2042 case BuiltinType::Char16: 2043 Width = Target->getChar16Width(); 2044 Align = Target->getChar16Align(); 2045 break; 2046 case BuiltinType::Char32: 2047 Width = Target->getChar32Width(); 2048 Align = Target->getChar32Align(); 2049 break; 2050 case BuiltinType::UShort: 2051 case BuiltinType::Short: 2052 Width = Target->getShortWidth(); 2053 Align = Target->getShortAlign(); 2054 break; 2055 case BuiltinType::UInt: 2056 case BuiltinType::Int: 2057 Width = Target->getIntWidth(); 2058 Align = Target->getIntAlign(); 2059 break; 2060 case BuiltinType::ULong: 2061 case BuiltinType::Long: 2062 Width = Target->getLongWidth(); 2063 Align = Target->getLongAlign(); 2064 break; 2065 case BuiltinType::ULongLong: 2066 case BuiltinType::LongLong: 2067 Width = Target->getLongLongWidth(); 2068 Align = Target->getLongLongAlign(); 2069 break; 2070 case BuiltinType::Int128: 2071 case BuiltinType::UInt128: 2072 Width = 128; 2073 Align = 128; // int128_t is 128-bit aligned on all targets. 2074 break; 2075 case BuiltinType::ShortAccum: 2076 case BuiltinType::UShortAccum: 2077 case BuiltinType::SatShortAccum: 2078 case BuiltinType::SatUShortAccum: 2079 Width = Target->getShortAccumWidth(); 2080 Align = Target->getShortAccumAlign(); 2081 break; 2082 case BuiltinType::Accum: 2083 case BuiltinType::UAccum: 2084 case BuiltinType::SatAccum: 2085 case BuiltinType::SatUAccum: 2086 Width = Target->getAccumWidth(); 2087 Align = Target->getAccumAlign(); 2088 break; 2089 case BuiltinType::LongAccum: 2090 case BuiltinType::ULongAccum: 2091 case BuiltinType::SatLongAccum: 2092 case BuiltinType::SatULongAccum: 2093 Width = Target->getLongAccumWidth(); 2094 Align = Target->getLongAccumAlign(); 2095 break; 2096 case BuiltinType::ShortFract: 2097 case BuiltinType::UShortFract: 2098 case BuiltinType::SatShortFract: 2099 case BuiltinType::SatUShortFract: 2100 Width = Target->getShortFractWidth(); 2101 Align = Target->getShortFractAlign(); 2102 break; 2103 case BuiltinType::Fract: 2104 case BuiltinType::UFract: 2105 case BuiltinType::SatFract: 2106 case BuiltinType::SatUFract: 2107 Width = Target->getFractWidth(); 2108 Align = Target->getFractAlign(); 2109 break; 2110 case BuiltinType::LongFract: 2111 case BuiltinType::ULongFract: 2112 case BuiltinType::SatLongFract: 2113 case BuiltinType::SatULongFract: 2114 Width = Target->getLongFractWidth(); 2115 Align = Target->getLongFractAlign(); 2116 break; 2117 case BuiltinType::BFloat16: 2118 Width = Target->getBFloat16Width(); 2119 Align = Target->getBFloat16Align(); 2120 break; 2121 case BuiltinType::Float16: 2122 case BuiltinType::Half: 2123 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2124 !getLangOpts().OpenMPIsDevice) { 2125 Width = Target->getHalfWidth(); 2126 Align = Target->getHalfAlign(); 2127 } else { 2128 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2129 "Expected OpenMP device compilation."); 2130 Width = AuxTarget->getHalfWidth(); 2131 Align = AuxTarget->getHalfAlign(); 2132 } 2133 break; 2134 case BuiltinType::Float: 2135 Width = Target->getFloatWidth(); 2136 Align = Target->getFloatAlign(); 2137 break; 2138 case BuiltinType::Double: 2139 Width = Target->getDoubleWidth(); 2140 Align = Target->getDoubleAlign(); 2141 break; 2142 case BuiltinType::Ibm128: 2143 Width = Target->getIbm128Width(); 2144 Align = Target->getIbm128Align(); 2145 break; 2146 case BuiltinType::LongDouble: 2147 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2148 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2149 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2150 Width = AuxTarget->getLongDoubleWidth(); 2151 Align = AuxTarget->getLongDoubleAlign(); 2152 } else { 2153 Width = Target->getLongDoubleWidth(); 2154 Align = Target->getLongDoubleAlign(); 2155 } 2156 break; 2157 case BuiltinType::Float128: 2158 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2159 !getLangOpts().OpenMPIsDevice) { 2160 Width = Target->getFloat128Width(); 2161 Align = Target->getFloat128Align(); 2162 } else { 2163 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2164 "Expected OpenMP device compilation."); 2165 Width = AuxTarget->getFloat128Width(); 2166 Align = AuxTarget->getFloat128Align(); 2167 } 2168 break; 2169 case BuiltinType::NullPtr: 2170 Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) 2171 Align = Target->getPointerAlign(0); // == sizeof(void*) 2172 break; 2173 case BuiltinType::ObjCId: 2174 case BuiltinType::ObjCClass: 2175 case BuiltinType::ObjCSel: 2176 Width = Target->getPointerWidth(0); 2177 Align = Target->getPointerAlign(0); 2178 break; 2179 case BuiltinType::OCLSampler: 2180 case BuiltinType::OCLEvent: 2181 case BuiltinType::OCLClkEvent: 2182 case BuiltinType::OCLQueue: 2183 case BuiltinType::OCLReserveID: 2184 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2185 case BuiltinType::Id: 2186 #include "clang/Basic/OpenCLImageTypes.def" 2187 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2188 case BuiltinType::Id: 2189 #include "clang/Basic/OpenCLExtensionTypes.def" 2190 AS = getTargetAddressSpace( 2191 Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T))); 2192 Width = Target->getPointerWidth(AS); 2193 Align = Target->getPointerAlign(AS); 2194 break; 2195 // The SVE types are effectively target-specific. The length of an 2196 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2197 // of 128 bits. There is one predicate bit for each vector byte, so the 2198 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2199 // 2200 // Because the length is only known at runtime, we use a dummy value 2201 // of 0 for the static length. The alignment values are those defined 2202 // by the Procedure Call Standard for the Arm Architecture. 2203 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2204 IsSigned, IsFP, IsBF) \ 2205 case BuiltinType::Id: \ 2206 Width = 0; \ 2207 Align = 128; \ 2208 break; 2209 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2210 case BuiltinType::Id: \ 2211 Width = 0; \ 2212 Align = 16; \ 2213 break; 2214 #include "clang/Basic/AArch64SVEACLETypes.def" 2215 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2216 case BuiltinType::Id: \ 2217 Width = Size; \ 2218 Align = Size; \ 2219 break; 2220 #include "clang/Basic/PPCTypes.def" 2221 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2222 IsFP) \ 2223 case BuiltinType::Id: \ 2224 Width = 0; \ 2225 Align = ElBits; \ 2226 break; 2227 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2228 case BuiltinType::Id: \ 2229 Width = 0; \ 2230 Align = 8; \ 2231 break; 2232 #include "clang/Basic/RISCVVTypes.def" 2233 } 2234 break; 2235 case Type::ObjCObjectPointer: 2236 Width = Target->getPointerWidth(0); 2237 Align = Target->getPointerAlign(0); 2238 break; 2239 case Type::BlockPointer: 2240 AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType()); 2241 Width = Target->getPointerWidth(AS); 2242 Align = Target->getPointerAlign(AS); 2243 break; 2244 case Type::LValueReference: 2245 case Type::RValueReference: 2246 // alignof and sizeof should never enter this code path here, so we go 2247 // the pointer route. 2248 AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType()); 2249 Width = Target->getPointerWidth(AS); 2250 Align = Target->getPointerAlign(AS); 2251 break; 2252 case Type::Pointer: 2253 AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); 2254 Width = Target->getPointerWidth(AS); 2255 Align = Target->getPointerAlign(AS); 2256 break; 2257 case Type::MemberPointer: { 2258 const auto *MPT = cast<MemberPointerType>(T); 2259 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2260 Width = MPI.Width; 2261 Align = MPI.Align; 2262 break; 2263 } 2264 case Type::Complex: { 2265 // Complex types have the same alignment as their elements, but twice the 2266 // size. 2267 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2268 Width = EltInfo.Width * 2; 2269 Align = EltInfo.Align; 2270 break; 2271 } 2272 case Type::ObjCObject: 2273 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2274 case Type::Adjusted: 2275 case Type::Decayed: 2276 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2277 case Type::ObjCInterface: { 2278 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2279 if (ObjCI->getDecl()->isInvalidDecl()) { 2280 Width = 8; 2281 Align = 8; 2282 break; 2283 } 2284 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2285 Width = toBits(Layout.getSize()); 2286 Align = toBits(Layout.getAlignment()); 2287 break; 2288 } 2289 case Type::BitInt: { 2290 const auto *EIT = cast<BitIntType>(T); 2291 Align = 2292 std::min(static_cast<unsigned>(std::max( 2293 getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), 2294 Target->getLongLongAlign()); 2295 Width = llvm::alignTo(EIT->getNumBits(), Align); 2296 break; 2297 } 2298 case Type::Record: 2299 case Type::Enum: { 2300 const auto *TT = cast<TagType>(T); 2301 2302 if (TT->getDecl()->isInvalidDecl()) { 2303 Width = 8; 2304 Align = 8; 2305 break; 2306 } 2307 2308 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2309 const EnumDecl *ED = ET->getDecl(); 2310 TypeInfo Info = 2311 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2312 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2313 Info.Align = AttrAlign; 2314 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2315 } 2316 return Info; 2317 } 2318 2319 const auto *RT = cast<RecordType>(TT); 2320 const RecordDecl *RD = RT->getDecl(); 2321 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2322 Width = toBits(Layout.getSize()); 2323 Align = toBits(Layout.getAlignment()); 2324 AlignRequirement = RD->hasAttr<AlignedAttr>() 2325 ? AlignRequirementKind::RequiredByRecord 2326 : AlignRequirementKind::None; 2327 break; 2328 } 2329 2330 case Type::SubstTemplateTypeParm: 2331 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2332 getReplacementType().getTypePtr()); 2333 2334 case Type::Auto: 2335 case Type::DeducedTemplateSpecialization: { 2336 const auto *A = cast<DeducedType>(T); 2337 assert(!A->getDeducedType().isNull() && 2338 "cannot request the size of an undeduced or dependent auto type"); 2339 return getTypeInfo(A->getDeducedType().getTypePtr()); 2340 } 2341 2342 case Type::Paren: 2343 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2344 2345 case Type::MacroQualified: 2346 return getTypeInfo( 2347 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2348 2349 case Type::ObjCTypeParam: 2350 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2351 2352 case Type::Using: 2353 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2354 2355 case Type::Typedef: { 2356 const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); 2357 TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); 2358 // If the typedef has an aligned attribute on it, it overrides any computed 2359 // alignment we have. This violates the GCC documentation (which says that 2360 // attribute(aligned) can only round up) but matches its implementation. 2361 if (unsigned AttrAlign = Typedef->getMaxAlignment()) { 2362 Align = AttrAlign; 2363 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2364 } else { 2365 Align = Info.Align; 2366 AlignRequirement = Info.AlignRequirement; 2367 } 2368 Width = Info.Width; 2369 break; 2370 } 2371 2372 case Type::Elaborated: 2373 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2374 2375 case Type::Attributed: 2376 return getTypeInfo( 2377 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2378 2379 case Type::Atomic: { 2380 // Start with the base type information. 2381 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2382 Width = Info.Width; 2383 Align = Info.Align; 2384 2385 if (!Width) { 2386 // An otherwise zero-sized type should still generate an 2387 // atomic operation. 2388 Width = Target->getCharWidth(); 2389 assert(Align); 2390 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2391 // If the size of the type doesn't exceed the platform's max 2392 // atomic promotion width, make the size and alignment more 2393 // favorable to atomic operations: 2394 2395 // Round the size up to a power of 2. 2396 if (!llvm::isPowerOf2_64(Width)) 2397 Width = llvm::NextPowerOf2(Width); 2398 2399 // Set the alignment equal to the size. 2400 Align = static_cast<unsigned>(Width); 2401 } 2402 } 2403 break; 2404 2405 case Type::Pipe: 2406 Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global)); 2407 Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global)); 2408 break; 2409 } 2410 2411 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2412 return TypeInfo(Width, Align, AlignRequirement); 2413 } 2414 2415 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2416 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2417 if (I != MemoizedUnadjustedAlign.end()) 2418 return I->second; 2419 2420 unsigned UnadjustedAlign; 2421 if (const auto *RT = T->getAs<RecordType>()) { 2422 const RecordDecl *RD = RT->getDecl(); 2423 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2424 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2425 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2426 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2427 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2428 } else { 2429 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2430 } 2431 2432 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2433 return UnadjustedAlign; 2434 } 2435 2436 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2437 unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); 2438 return SimdAlign; 2439 } 2440 2441 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2442 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2443 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2444 } 2445 2446 /// toBits - Convert a size in characters to a size in characters. 2447 int64_t ASTContext::toBits(CharUnits CharSize) const { 2448 return CharSize.getQuantity() * getCharWidth(); 2449 } 2450 2451 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2452 /// This method does not work on incomplete types. 2453 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2454 return getTypeInfoInChars(T).Width; 2455 } 2456 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2457 return getTypeInfoInChars(T).Width; 2458 } 2459 2460 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2461 /// characters. This method does not work on incomplete types. 2462 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2463 return toCharUnitsFromBits(getTypeAlign(T)); 2464 } 2465 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2466 return toCharUnitsFromBits(getTypeAlign(T)); 2467 } 2468 2469 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2470 /// type, in characters, before alignment adustments. This method does 2471 /// not work on incomplete types. 2472 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2473 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2474 } 2475 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2476 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2477 } 2478 2479 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2480 /// type for the current target in bits. This can be different than the ABI 2481 /// alignment in cases where it is beneficial for performance or backwards 2482 /// compatibility preserving to overalign a data type. (Note: despite the name, 2483 /// the preferred alignment is ABI-impacting, and not an optimization.) 2484 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2485 TypeInfo TI = getTypeInfo(T); 2486 unsigned ABIAlign = TI.Align; 2487 2488 T = T->getBaseElementTypeUnsafe(); 2489 2490 // The preferred alignment of member pointers is that of a pointer. 2491 if (T->isMemberPointerType()) 2492 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2493 2494 if (!Target->allowsLargerPreferedTypeAlignment()) 2495 return ABIAlign; 2496 2497 if (const auto *RT = T->getAs<RecordType>()) { 2498 const RecordDecl *RD = RT->getDecl(); 2499 2500 // When used as part of a typedef, or together with a 'packed' attribute, 2501 // the 'aligned' attribute can be used to decrease alignment. Note that the 2502 // 'packed' case is already taken into consideration when computing the 2503 // alignment, we only need to handle the typedef case here. 2504 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2505 RD->isInvalidDecl()) 2506 return ABIAlign; 2507 2508 unsigned PreferredAlign = static_cast<unsigned>( 2509 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2510 assert(PreferredAlign >= ABIAlign && 2511 "PreferredAlign should be at least as large as ABIAlign."); 2512 return PreferredAlign; 2513 } 2514 2515 // Double (and, for targets supporting AIX `power` alignment, long double) and 2516 // long long should be naturally aligned (despite requiring less alignment) if 2517 // possible. 2518 if (const auto *CT = T->getAs<ComplexType>()) 2519 T = CT->getElementType().getTypePtr(); 2520 if (const auto *ET = T->getAs<EnumType>()) 2521 T = ET->getDecl()->getIntegerType().getTypePtr(); 2522 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2523 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2524 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2525 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2526 Target->defaultsToAIXPowerAlignment())) 2527 // Don't increase the alignment if an alignment attribute was specified on a 2528 // typedef declaration. 2529 if (!TI.isAlignRequired()) 2530 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2531 2532 return ABIAlign; 2533 } 2534 2535 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2536 /// for __attribute__((aligned)) on this target, to be used if no alignment 2537 /// value is specified. 2538 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2539 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2540 } 2541 2542 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2543 /// to a global variable of the specified type. 2544 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2545 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2546 return std::max(getPreferredTypeAlign(T), 2547 getTargetInfo().getMinGlobalAlign(TypeSize)); 2548 } 2549 2550 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2551 /// should be given to a global variable of the specified type. 2552 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2553 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2554 } 2555 2556 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2557 CharUnits Offset = CharUnits::Zero(); 2558 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2559 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2560 Offset += Layout->getBaseClassOffset(Base); 2561 Layout = &getASTRecordLayout(Base); 2562 } 2563 return Offset; 2564 } 2565 2566 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2567 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2568 CharUnits ThisAdjustment = CharUnits::Zero(); 2569 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2570 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2571 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2572 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2573 const CXXRecordDecl *Base = RD; 2574 const CXXRecordDecl *Derived = Path[I]; 2575 if (DerivedMember) 2576 std::swap(Base, Derived); 2577 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2578 RD = Path[I]; 2579 } 2580 if (DerivedMember) 2581 ThisAdjustment = -ThisAdjustment; 2582 return ThisAdjustment; 2583 } 2584 2585 /// DeepCollectObjCIvars - 2586 /// This routine first collects all declared, but not synthesized, ivars in 2587 /// super class and then collects all ivars, including those synthesized for 2588 /// current class. This routine is used for implementation of current class 2589 /// when all ivars, declared and synthesized are known. 2590 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2591 bool leafClass, 2592 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2593 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2594 DeepCollectObjCIvars(SuperClass, false, Ivars); 2595 if (!leafClass) { 2596 for (const auto *I : OI->ivars()) 2597 Ivars.push_back(I); 2598 } else { 2599 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2600 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2601 Iv= Iv->getNextIvar()) 2602 Ivars.push_back(Iv); 2603 } 2604 } 2605 2606 /// CollectInheritedProtocols - Collect all protocols in current class and 2607 /// those inherited by it. 2608 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2609 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2610 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2611 // We can use protocol_iterator here instead of 2612 // all_referenced_protocol_iterator since we are walking all categories. 2613 for (auto *Proto : OI->all_referenced_protocols()) { 2614 CollectInheritedProtocols(Proto, Protocols); 2615 } 2616 2617 // Categories of this Interface. 2618 for (const auto *Cat : OI->visible_categories()) 2619 CollectInheritedProtocols(Cat, Protocols); 2620 2621 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2622 while (SD) { 2623 CollectInheritedProtocols(SD, Protocols); 2624 SD = SD->getSuperClass(); 2625 } 2626 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2627 for (auto *Proto : OC->protocols()) { 2628 CollectInheritedProtocols(Proto, Protocols); 2629 } 2630 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2631 // Insert the protocol. 2632 if (!Protocols.insert( 2633 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2634 return; 2635 2636 for (auto *Proto : OP->protocols()) 2637 CollectInheritedProtocols(Proto, Protocols); 2638 } 2639 } 2640 2641 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2642 const RecordDecl *RD) { 2643 assert(RD->isUnion() && "Must be union type"); 2644 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2645 2646 for (const auto *Field : RD->fields()) { 2647 if (!Context.hasUniqueObjectRepresentations(Field->getType())) 2648 return false; 2649 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2650 if (FieldSize != UnionSize) 2651 return false; 2652 } 2653 return !RD->field_empty(); 2654 } 2655 2656 static int64_t getSubobjectOffset(const FieldDecl *Field, 2657 const ASTContext &Context, 2658 const clang::ASTRecordLayout & /*Layout*/) { 2659 return Context.getFieldOffset(Field); 2660 } 2661 2662 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2663 const ASTContext &Context, 2664 const clang::ASTRecordLayout &Layout) { 2665 return Context.toBits(Layout.getBaseClassOffset(RD)); 2666 } 2667 2668 static llvm::Optional<int64_t> 2669 structHasUniqueObjectRepresentations(const ASTContext &Context, 2670 const RecordDecl *RD); 2671 2672 static llvm::Optional<int64_t> 2673 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) { 2674 if (Field->getType()->isRecordType()) { 2675 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2676 if (!RD->isUnion()) 2677 return structHasUniqueObjectRepresentations(Context, RD); 2678 } 2679 if (!Field->getType()->isReferenceType() && 2680 !Context.hasUniqueObjectRepresentations(Field->getType())) 2681 return llvm::None; 2682 2683 int64_t FieldSizeInBits = 2684 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2685 if (Field->isBitField()) { 2686 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2687 if (BitfieldSize > FieldSizeInBits) 2688 return llvm::None; 2689 FieldSizeInBits = BitfieldSize; 2690 } 2691 return FieldSizeInBits; 2692 } 2693 2694 static llvm::Optional<int64_t> 2695 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context) { 2696 return structHasUniqueObjectRepresentations(Context, RD); 2697 } 2698 2699 template <typename RangeT> 2700 static llvm::Optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2701 const RangeT &Subobjects, int64_t CurOffsetInBits, 2702 const ASTContext &Context, const clang::ASTRecordLayout &Layout) { 2703 for (const auto *Subobject : Subobjects) { 2704 llvm::Optional<int64_t> SizeInBits = 2705 getSubobjectSizeInBits(Subobject, Context); 2706 if (!SizeInBits) 2707 return llvm::None; 2708 if (*SizeInBits != 0) { 2709 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2710 if (Offset != CurOffsetInBits) 2711 return llvm::None; 2712 CurOffsetInBits += *SizeInBits; 2713 } 2714 } 2715 return CurOffsetInBits; 2716 } 2717 2718 static llvm::Optional<int64_t> 2719 structHasUniqueObjectRepresentations(const ASTContext &Context, 2720 const RecordDecl *RD) { 2721 assert(!RD->isUnion() && "Must be struct/class type"); 2722 const auto &Layout = Context.getASTRecordLayout(RD); 2723 2724 int64_t CurOffsetInBits = 0; 2725 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2726 if (ClassDecl->isDynamicClass()) 2727 return llvm::None; 2728 2729 SmallVector<CXXRecordDecl *, 4> Bases; 2730 for (const auto &Base : ClassDecl->bases()) { 2731 // Empty types can be inherited from, and non-empty types can potentially 2732 // have tail padding, so just make sure there isn't an error. 2733 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2734 } 2735 2736 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2737 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2738 }); 2739 2740 llvm::Optional<int64_t> OffsetAfterBases = 2741 structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits, 2742 Context, Layout); 2743 if (!OffsetAfterBases) 2744 return llvm::None; 2745 CurOffsetInBits = *OffsetAfterBases; 2746 } 2747 2748 llvm::Optional<int64_t> OffsetAfterFields = 2749 structSubobjectsHaveUniqueObjectRepresentations( 2750 RD->fields(), CurOffsetInBits, Context, Layout); 2751 if (!OffsetAfterFields) 2752 return llvm::None; 2753 CurOffsetInBits = *OffsetAfterFields; 2754 2755 return CurOffsetInBits; 2756 } 2757 2758 bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { 2759 // C++17 [meta.unary.prop]: 2760 // The predicate condition for a template specialization 2761 // has_unique_object_representations<T> shall be 2762 // satisfied if and only if: 2763 // (9.1) - T is trivially copyable, and 2764 // (9.2) - any two objects of type T with the same value have the same 2765 // object representation, where two objects 2766 // of array or non-union class type are considered to have the same value 2767 // if their respective sequences of 2768 // direct subobjects have the same values, and two objects of union type 2769 // are considered to have the same 2770 // value if they have the same active member and the corresponding members 2771 // have the same value. 2772 // The set of scalar types for which this condition holds is 2773 // implementation-defined. [ Note: If a type has padding 2774 // bits, the condition does not hold; otherwise, the condition holds true 2775 // for unsigned integral types. -- end note ] 2776 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2777 2778 // Arrays are unique only if their element type is unique. 2779 if (Ty->isArrayType()) 2780 return hasUniqueObjectRepresentations(getBaseElementType(Ty)); 2781 2782 // (9.1) - T is trivially copyable... 2783 if (!Ty.isTriviallyCopyableType(*this)) 2784 return false; 2785 2786 // All integrals and enums are unique. 2787 if (Ty->isIntegralOrEnumerationType()) 2788 return true; 2789 2790 // All other pointers are unique. 2791 if (Ty->isPointerType()) 2792 return true; 2793 2794 if (Ty->isMemberPointerType()) { 2795 const auto *MPT = Ty->getAs<MemberPointerType>(); 2796 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2797 } 2798 2799 if (Ty->isRecordType()) { 2800 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2801 2802 if (Record->isInvalidDecl()) 2803 return false; 2804 2805 if (Record->isUnion()) 2806 return unionHasUniqueObjectRepresentations(*this, Record); 2807 2808 Optional<int64_t> StructSize = 2809 structHasUniqueObjectRepresentations(*this, Record); 2810 2811 return StructSize && 2812 StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty)); 2813 } 2814 2815 // FIXME: More cases to handle here (list by rsmith): 2816 // vectors (careful about, eg, vector of 3 foo) 2817 // _Complex int and friends 2818 // _Atomic T 2819 // Obj-C block pointers 2820 // Obj-C object pointers 2821 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2822 // clk_event_t, queue_t, reserve_id_t) 2823 // There're also Obj-C class types and the Obj-C selector type, but I think it 2824 // makes sense for those to return false here. 2825 2826 return false; 2827 } 2828 2829 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2830 unsigned count = 0; 2831 // Count ivars declared in class extension. 2832 for (const auto *Ext : OI->known_extensions()) 2833 count += Ext->ivar_size(); 2834 2835 // Count ivar defined in this class's implementation. This 2836 // includes synthesized ivars. 2837 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2838 count += ImplDecl->ivar_size(); 2839 2840 return count; 2841 } 2842 2843 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2844 if (!E) 2845 return false; 2846 2847 // nullptr_t is always treated as null. 2848 if (E->getType()->isNullPtrType()) return true; 2849 2850 if (E->getType()->isAnyPointerType() && 2851 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2852 Expr::NPC_ValueDependentIsNull)) 2853 return true; 2854 2855 // Unfortunately, __null has type 'int'. 2856 if (isa<GNUNullExpr>(E)) return true; 2857 2858 return false; 2859 } 2860 2861 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2862 /// exists. 2863 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2864 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2865 I = ObjCImpls.find(D); 2866 if (I != ObjCImpls.end()) 2867 return cast<ObjCImplementationDecl>(I->second); 2868 return nullptr; 2869 } 2870 2871 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2872 /// exists. 2873 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2874 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2875 I = ObjCImpls.find(D); 2876 if (I != ObjCImpls.end()) 2877 return cast<ObjCCategoryImplDecl>(I->second); 2878 return nullptr; 2879 } 2880 2881 /// Set the implementation of ObjCInterfaceDecl. 2882 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2883 ObjCImplementationDecl *ImplD) { 2884 assert(IFaceD && ImplD && "Passed null params"); 2885 ObjCImpls[IFaceD] = ImplD; 2886 } 2887 2888 /// Set the implementation of ObjCCategoryDecl. 2889 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2890 ObjCCategoryImplDecl *ImplD) { 2891 assert(CatD && ImplD && "Passed null params"); 2892 ObjCImpls[CatD] = ImplD; 2893 } 2894 2895 const ObjCMethodDecl * 2896 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2897 return ObjCMethodRedecls.lookup(MD); 2898 } 2899 2900 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2901 const ObjCMethodDecl *Redecl) { 2902 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2903 ObjCMethodRedecls[MD] = Redecl; 2904 } 2905 2906 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2907 const NamedDecl *ND) const { 2908 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2909 return ID; 2910 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2911 return CD->getClassInterface(); 2912 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2913 return IMD->getClassInterface(); 2914 2915 return nullptr; 2916 } 2917 2918 /// Get the copy initialization expression of VarDecl, or nullptr if 2919 /// none exists. 2920 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2921 assert(VD && "Passed null params"); 2922 assert(VD->hasAttr<BlocksAttr>() && 2923 "getBlockVarCopyInits - not __block var"); 2924 auto I = BlockVarCopyInits.find(VD); 2925 if (I != BlockVarCopyInits.end()) 2926 return I->second; 2927 return {nullptr, false}; 2928 } 2929 2930 /// Set the copy initialization expression of a block var decl. 2931 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2932 bool CanThrow) { 2933 assert(VD && CopyExpr && "Passed null params"); 2934 assert(VD->hasAttr<BlocksAttr>() && 2935 "setBlockVarCopyInits - not __block var"); 2936 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2937 } 2938 2939 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2940 unsigned DataSize) const { 2941 if (!DataSize) 2942 DataSize = TypeLoc::getFullDataSizeForType(T); 2943 else 2944 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2945 "incorrect data size provided to CreateTypeSourceInfo!"); 2946 2947 auto *TInfo = 2948 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2949 new (TInfo) TypeSourceInfo(T); 2950 return TInfo; 2951 } 2952 2953 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2954 SourceLocation L) const { 2955 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2956 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2957 return DI; 2958 } 2959 2960 const ASTRecordLayout & 2961 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2962 return getObjCLayout(D, nullptr); 2963 } 2964 2965 const ASTRecordLayout & 2966 ASTContext::getASTObjCImplementationLayout( 2967 const ObjCImplementationDecl *D) const { 2968 return getObjCLayout(D->getClassInterface(), D); 2969 } 2970 2971 //===----------------------------------------------------------------------===// 2972 // Type creation/memoization methods 2973 //===----------------------------------------------------------------------===// 2974 2975 QualType 2976 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 2977 unsigned fastQuals = quals.getFastQualifiers(); 2978 quals.removeFastQualifiers(); 2979 2980 // Check if we've already instantiated this type. 2981 llvm::FoldingSetNodeID ID; 2982 ExtQuals::Profile(ID, baseType, quals); 2983 void *insertPos = nullptr; 2984 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 2985 assert(eq->getQualifiers() == quals); 2986 return QualType(eq, fastQuals); 2987 } 2988 2989 // If the base type is not canonical, make the appropriate canonical type. 2990 QualType canon; 2991 if (!baseType->isCanonicalUnqualified()) { 2992 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 2993 canonSplit.Quals.addConsistentQualifiers(quals); 2994 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 2995 2996 // Re-find the insert position. 2997 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 2998 } 2999 3000 auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 3001 ExtQualNodes.InsertNode(eq, insertPos); 3002 return QualType(eq, fastQuals); 3003 } 3004 3005 QualType ASTContext::getAddrSpaceQualType(QualType T, 3006 LangAS AddressSpace) const { 3007 QualType CanT = getCanonicalType(T); 3008 if (CanT.getAddressSpace() == AddressSpace) 3009 return T; 3010 3011 // If we are composing extended qualifiers together, merge together 3012 // into one ExtQuals node. 3013 QualifierCollector Quals; 3014 const Type *TypeNode = Quals.strip(T); 3015 3016 // If this type already has an address space specified, it cannot get 3017 // another one. 3018 assert(!Quals.hasAddressSpace() && 3019 "Type cannot be in multiple addr spaces!"); 3020 Quals.addAddressSpace(AddressSpace); 3021 3022 return getExtQualType(TypeNode, Quals); 3023 } 3024 3025 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3026 // If the type is not qualified with an address space, just return it 3027 // immediately. 3028 if (!T.hasAddressSpace()) 3029 return T; 3030 3031 // If we are composing extended qualifiers together, merge together 3032 // into one ExtQuals node. 3033 QualifierCollector Quals; 3034 const Type *TypeNode; 3035 3036 while (T.hasAddressSpace()) { 3037 TypeNode = Quals.strip(T); 3038 3039 // If the type no longer has an address space after stripping qualifiers, 3040 // jump out. 3041 if (!QualType(TypeNode, 0).hasAddressSpace()) 3042 break; 3043 3044 // There might be sugar in the way. Strip it and try again. 3045 T = T.getSingleStepDesugaredType(*this); 3046 } 3047 3048 Quals.removeAddressSpace(); 3049 3050 // Removal of the address space can mean there are no longer any 3051 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3052 // or required. 3053 if (Quals.hasNonFastQualifiers()) 3054 return getExtQualType(TypeNode, Quals); 3055 else 3056 return QualType(TypeNode, Quals.getFastQualifiers()); 3057 } 3058 3059 QualType ASTContext::getObjCGCQualType(QualType T, 3060 Qualifiers::GC GCAttr) const { 3061 QualType CanT = getCanonicalType(T); 3062 if (CanT.getObjCGCAttr() == GCAttr) 3063 return T; 3064 3065 if (const auto *ptr = T->getAs<PointerType>()) { 3066 QualType Pointee = ptr->getPointeeType(); 3067 if (Pointee->isAnyPointerType()) { 3068 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3069 return getPointerType(ResultType); 3070 } 3071 } 3072 3073 // If we are composing extended qualifiers together, merge together 3074 // into one ExtQuals node. 3075 QualifierCollector Quals; 3076 const Type *TypeNode = Quals.strip(T); 3077 3078 // If this type already has an ObjCGC specified, it cannot get 3079 // another one. 3080 assert(!Quals.hasObjCGCAttr() && 3081 "Type cannot have multiple ObjCGCs!"); 3082 Quals.addObjCGCAttr(GCAttr); 3083 3084 return getExtQualType(TypeNode, Quals); 3085 } 3086 3087 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3088 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3089 QualType Pointee = Ptr->getPointeeType(); 3090 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3091 return getPointerType(removeAddrSpaceQualType(Pointee)); 3092 } 3093 } 3094 return T; 3095 } 3096 3097 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3098 FunctionType::ExtInfo Info) { 3099 if (T->getExtInfo() == Info) 3100 return T; 3101 3102 QualType Result; 3103 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3104 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3105 } else { 3106 const auto *FPT = cast<FunctionProtoType>(T); 3107 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3108 EPI.ExtInfo = Info; 3109 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3110 } 3111 3112 return cast<FunctionType>(Result.getTypePtr()); 3113 } 3114 3115 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3116 QualType ResultType) { 3117 FD = FD->getMostRecentDecl(); 3118 while (true) { 3119 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3120 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3121 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3122 if (FunctionDecl *Next = FD->getPreviousDecl()) 3123 FD = Next; 3124 else 3125 break; 3126 } 3127 if (ASTMutationListener *L = getASTMutationListener()) 3128 L->DeducedReturnType(FD, ResultType); 3129 } 3130 3131 /// Get a function type and produce the equivalent function type with the 3132 /// specified exception specification. Type sugar that can be present on a 3133 /// declaration of a function with an exception specification is permitted 3134 /// and preserved. Other type sugar (for instance, typedefs) is not. 3135 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3136 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) { 3137 // Might have some parens. 3138 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3139 return getParenType( 3140 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3141 3142 // Might be wrapped in a macro qualified type. 3143 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3144 return getMacroQualifiedType( 3145 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3146 MQT->getMacroIdentifier()); 3147 3148 // Might have a calling-convention attribute. 3149 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3150 return getAttributedType( 3151 AT->getAttrKind(), 3152 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3153 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3154 3155 // Anything else must be a function type. Rebuild it with the new exception 3156 // specification. 3157 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3158 return getFunctionType( 3159 Proto->getReturnType(), Proto->getParamTypes(), 3160 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3161 } 3162 3163 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3164 QualType U) { 3165 return hasSameType(T, U) || 3166 (getLangOpts().CPlusPlus17 && 3167 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3168 getFunctionTypeWithExceptionSpec(U, EST_None))); 3169 } 3170 3171 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3172 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3173 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3174 SmallVector<QualType, 16> Args(Proto->param_types()); 3175 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3176 Args[i] = removePtrSizeAddrSpace(Args[i]); 3177 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3178 } 3179 3180 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3181 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3182 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3183 } 3184 3185 return T; 3186 } 3187 3188 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3189 return hasSameType(T, U) || 3190 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3191 getFunctionTypeWithoutPtrSizes(U)); 3192 } 3193 3194 void ASTContext::adjustExceptionSpec( 3195 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3196 bool AsWritten) { 3197 // Update the type. 3198 QualType Updated = 3199 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3200 FD->setType(Updated); 3201 3202 if (!AsWritten) 3203 return; 3204 3205 // Update the type in the type source information too. 3206 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3207 // If the type and the type-as-written differ, we may need to update 3208 // the type-as-written too. 3209 if (TSInfo->getType() != FD->getType()) 3210 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3211 3212 // FIXME: When we get proper type location information for exceptions, 3213 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3214 // up the TypeSourceInfo; 3215 assert(TypeLoc::getFullDataSizeForType(Updated) == 3216 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3217 "TypeLoc size mismatch from updating exception specification"); 3218 TSInfo->overrideType(Updated); 3219 } 3220 } 3221 3222 /// getComplexType - Return the uniqued reference to the type for a complex 3223 /// number with the specified element type. 3224 QualType ASTContext::getComplexType(QualType T) const { 3225 // Unique pointers, to guarantee there is only one pointer of a particular 3226 // structure. 3227 llvm::FoldingSetNodeID ID; 3228 ComplexType::Profile(ID, T); 3229 3230 void *InsertPos = nullptr; 3231 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3232 return QualType(CT, 0); 3233 3234 // If the pointee type isn't canonical, this won't be a canonical type either, 3235 // so fill in the canonical type field. 3236 QualType Canonical; 3237 if (!T.isCanonical()) { 3238 Canonical = getComplexType(getCanonicalType(T)); 3239 3240 // Get the new insert position for the node we care about. 3241 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3242 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3243 } 3244 auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 3245 Types.push_back(New); 3246 ComplexTypes.InsertNode(New, InsertPos); 3247 return QualType(New, 0); 3248 } 3249 3250 /// getPointerType - Return the uniqued reference to the type for a pointer to 3251 /// the specified type. 3252 QualType ASTContext::getPointerType(QualType T) const { 3253 // Unique pointers, to guarantee there is only one pointer of a particular 3254 // structure. 3255 llvm::FoldingSetNodeID ID; 3256 PointerType::Profile(ID, T); 3257 3258 void *InsertPos = nullptr; 3259 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3260 return QualType(PT, 0); 3261 3262 // If the pointee type isn't canonical, this won't be a canonical type either, 3263 // so fill in the canonical type field. 3264 QualType Canonical; 3265 if (!T.isCanonical()) { 3266 Canonical = getPointerType(getCanonicalType(T)); 3267 3268 // Get the new insert position for the node we care about. 3269 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3270 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3271 } 3272 auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); 3273 Types.push_back(New); 3274 PointerTypes.InsertNode(New, InsertPos); 3275 return QualType(New, 0); 3276 } 3277 3278 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3279 llvm::FoldingSetNodeID ID; 3280 AdjustedType::Profile(ID, Orig, New); 3281 void *InsertPos = nullptr; 3282 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3283 if (AT) 3284 return QualType(AT, 0); 3285 3286 QualType Canonical = getCanonicalType(New); 3287 3288 // Get the new insert position for the node we care about. 3289 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3290 assert(!AT && "Shouldn't be in the map!"); 3291 3292 AT = new (*this, TypeAlignment) 3293 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3294 Types.push_back(AT); 3295 AdjustedTypes.InsertNode(AT, InsertPos); 3296 return QualType(AT, 0); 3297 } 3298 3299 QualType ASTContext::getDecayedType(QualType T) const { 3300 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3301 3302 QualType Decayed; 3303 3304 // C99 6.7.5.3p7: 3305 // A declaration of a parameter as "array of type" shall be 3306 // adjusted to "qualified pointer to type", where the type 3307 // qualifiers (if any) are those specified within the [ and ] of 3308 // the array type derivation. 3309 if (T->isArrayType()) 3310 Decayed = getArrayDecayedType(T); 3311 3312 // C99 6.7.5.3p8: 3313 // A declaration of a parameter as "function returning type" 3314 // shall be adjusted to "pointer to function returning type", as 3315 // in 6.3.2.1. 3316 if (T->isFunctionType()) 3317 Decayed = getPointerType(T); 3318 3319 llvm::FoldingSetNodeID ID; 3320 AdjustedType::Profile(ID, T, Decayed); 3321 void *InsertPos = nullptr; 3322 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3323 if (AT) 3324 return QualType(AT, 0); 3325 3326 QualType Canonical = getCanonicalType(Decayed); 3327 3328 // Get the new insert position for the node we care about. 3329 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3330 assert(!AT && "Shouldn't be in the map!"); 3331 3332 AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); 3333 Types.push_back(AT); 3334 AdjustedTypes.InsertNode(AT, InsertPos); 3335 return QualType(AT, 0); 3336 } 3337 3338 /// getBlockPointerType - Return the uniqued reference to the type for 3339 /// a pointer to the specified block. 3340 QualType ASTContext::getBlockPointerType(QualType T) const { 3341 assert(T->isFunctionType() && "block of function types only"); 3342 // Unique pointers, to guarantee there is only one block of a particular 3343 // structure. 3344 llvm::FoldingSetNodeID ID; 3345 BlockPointerType::Profile(ID, T); 3346 3347 void *InsertPos = nullptr; 3348 if (BlockPointerType *PT = 3349 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3350 return QualType(PT, 0); 3351 3352 // If the block pointee type isn't canonical, this won't be a canonical 3353 // type either so fill in the canonical type field. 3354 QualType Canonical; 3355 if (!T.isCanonical()) { 3356 Canonical = getBlockPointerType(getCanonicalType(T)); 3357 3358 // Get the new insert position for the node we care about. 3359 BlockPointerType *NewIP = 3360 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3361 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3362 } 3363 auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 3364 Types.push_back(New); 3365 BlockPointerTypes.InsertNode(New, InsertPos); 3366 return QualType(New, 0); 3367 } 3368 3369 /// getLValueReferenceType - Return the uniqued reference to the type for an 3370 /// lvalue reference to the specified type. 3371 QualType 3372 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3373 assert((!T->isPlaceholderType() || 3374 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3375 "Unresolved placeholder type"); 3376 3377 // Unique pointers, to guarantee there is only one pointer of a particular 3378 // structure. 3379 llvm::FoldingSetNodeID ID; 3380 ReferenceType::Profile(ID, T, SpelledAsLValue); 3381 3382 void *InsertPos = nullptr; 3383 if (LValueReferenceType *RT = 3384 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3385 return QualType(RT, 0); 3386 3387 const auto *InnerRef = T->getAs<ReferenceType>(); 3388 3389 // If the referencee type isn't canonical, this won't be a canonical type 3390 // either, so fill in the canonical type field. 3391 QualType Canonical; 3392 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3393 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3394 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3395 3396 // Get the new insert position for the node we care about. 3397 LValueReferenceType *NewIP = 3398 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3399 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3400 } 3401 3402 auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 3403 SpelledAsLValue); 3404 Types.push_back(New); 3405 LValueReferenceTypes.InsertNode(New, InsertPos); 3406 3407 return QualType(New, 0); 3408 } 3409 3410 /// getRValueReferenceType - Return the uniqued reference to the type for an 3411 /// rvalue reference to the specified type. 3412 QualType ASTContext::getRValueReferenceType(QualType T) const { 3413 assert((!T->isPlaceholderType() || 3414 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3415 "Unresolved placeholder type"); 3416 3417 // Unique pointers, to guarantee there is only one pointer of a particular 3418 // structure. 3419 llvm::FoldingSetNodeID ID; 3420 ReferenceType::Profile(ID, T, false); 3421 3422 void *InsertPos = nullptr; 3423 if (RValueReferenceType *RT = 3424 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3425 return QualType(RT, 0); 3426 3427 const auto *InnerRef = T->getAs<ReferenceType>(); 3428 3429 // If the referencee type isn't canonical, this won't be a canonical type 3430 // either, so fill in the canonical type field. 3431 QualType Canonical; 3432 if (InnerRef || !T.isCanonical()) { 3433 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3434 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3435 3436 // Get the new insert position for the node we care about. 3437 RValueReferenceType *NewIP = 3438 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3439 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3440 } 3441 3442 auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 3443 Types.push_back(New); 3444 RValueReferenceTypes.InsertNode(New, InsertPos); 3445 return QualType(New, 0); 3446 } 3447 3448 /// getMemberPointerType - Return the uniqued reference to the type for a 3449 /// member pointer to the specified type, in the specified class. 3450 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3451 // Unique pointers, to guarantee there is only one pointer of a particular 3452 // structure. 3453 llvm::FoldingSetNodeID ID; 3454 MemberPointerType::Profile(ID, T, Cls); 3455 3456 void *InsertPos = nullptr; 3457 if (MemberPointerType *PT = 3458 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3459 return QualType(PT, 0); 3460 3461 // If the pointee or class type isn't canonical, this won't be a canonical 3462 // type either, so fill in the canonical type field. 3463 QualType Canonical; 3464 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3465 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3466 3467 // Get the new insert position for the node we care about. 3468 MemberPointerType *NewIP = 3469 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3470 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3471 } 3472 auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 3473 Types.push_back(New); 3474 MemberPointerTypes.InsertNode(New, InsertPos); 3475 return QualType(New, 0); 3476 } 3477 3478 /// getConstantArrayType - Return the unique reference to the type for an 3479 /// array of the specified element type. 3480 QualType ASTContext::getConstantArrayType(QualType EltTy, 3481 const llvm::APInt &ArySizeIn, 3482 const Expr *SizeExpr, 3483 ArrayType::ArraySizeModifier ASM, 3484 unsigned IndexTypeQuals) const { 3485 assert((EltTy->isDependentType() || 3486 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3487 "Constant array of VLAs is illegal!"); 3488 3489 // We only need the size as part of the type if it's instantiation-dependent. 3490 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3491 SizeExpr = nullptr; 3492 3493 // Convert the array size into a canonical width matching the pointer size for 3494 // the target. 3495 llvm::APInt ArySize(ArySizeIn); 3496 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3497 3498 llvm::FoldingSetNodeID ID; 3499 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3500 IndexTypeQuals); 3501 3502 void *InsertPos = nullptr; 3503 if (ConstantArrayType *ATP = 3504 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3505 return QualType(ATP, 0); 3506 3507 // If the element type isn't canonical or has qualifiers, or the array bound 3508 // is instantiation-dependent, this won't be a canonical type either, so fill 3509 // in the canonical type field. 3510 QualType Canon; 3511 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3512 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3513 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3514 ASM, IndexTypeQuals); 3515 Canon = getQualifiedType(Canon, canonSplit.Quals); 3516 3517 // Get the new insert position for the node we care about. 3518 ConstantArrayType *NewIP = 3519 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3520 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3521 } 3522 3523 void *Mem = Allocate( 3524 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3525 TypeAlignment); 3526 auto *New = new (Mem) 3527 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3528 ConstantArrayTypes.InsertNode(New, InsertPos); 3529 Types.push_back(New); 3530 return QualType(New, 0); 3531 } 3532 3533 /// getVariableArrayDecayedType - Turns the given type, which may be 3534 /// variably-modified, into the corresponding type with all the known 3535 /// sizes replaced with [*]. 3536 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3537 // Vastly most common case. 3538 if (!type->isVariablyModifiedType()) return type; 3539 3540 QualType result; 3541 3542 SplitQualType split = type.getSplitDesugaredType(); 3543 const Type *ty = split.Ty; 3544 switch (ty->getTypeClass()) { 3545 #define TYPE(Class, Base) 3546 #define ABSTRACT_TYPE(Class, Base) 3547 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3548 #include "clang/AST/TypeNodes.inc" 3549 llvm_unreachable("didn't desugar past all non-canonical types?"); 3550 3551 // These types should never be variably-modified. 3552 case Type::Builtin: 3553 case Type::Complex: 3554 case Type::Vector: 3555 case Type::DependentVector: 3556 case Type::ExtVector: 3557 case Type::DependentSizedExtVector: 3558 case Type::ConstantMatrix: 3559 case Type::DependentSizedMatrix: 3560 case Type::DependentAddressSpace: 3561 case Type::ObjCObject: 3562 case Type::ObjCInterface: 3563 case Type::ObjCObjectPointer: 3564 case Type::Record: 3565 case Type::Enum: 3566 case Type::UnresolvedUsing: 3567 case Type::TypeOfExpr: 3568 case Type::TypeOf: 3569 case Type::Decltype: 3570 case Type::UnaryTransform: 3571 case Type::DependentName: 3572 case Type::InjectedClassName: 3573 case Type::TemplateSpecialization: 3574 case Type::DependentTemplateSpecialization: 3575 case Type::TemplateTypeParm: 3576 case Type::SubstTemplateTypeParmPack: 3577 case Type::Auto: 3578 case Type::DeducedTemplateSpecialization: 3579 case Type::PackExpansion: 3580 case Type::BitInt: 3581 case Type::DependentBitInt: 3582 llvm_unreachable("type should never be variably-modified"); 3583 3584 // These types can be variably-modified but should never need to 3585 // further decay. 3586 case Type::FunctionNoProto: 3587 case Type::FunctionProto: 3588 case Type::BlockPointer: 3589 case Type::MemberPointer: 3590 case Type::Pipe: 3591 return type; 3592 3593 // These types can be variably-modified. All these modifications 3594 // preserve structure except as noted by comments. 3595 // TODO: if we ever care about optimizing VLAs, there are no-op 3596 // optimizations available here. 3597 case Type::Pointer: 3598 result = getPointerType(getVariableArrayDecayedType( 3599 cast<PointerType>(ty)->getPointeeType())); 3600 break; 3601 3602 case Type::LValueReference: { 3603 const auto *lv = cast<LValueReferenceType>(ty); 3604 result = getLValueReferenceType( 3605 getVariableArrayDecayedType(lv->getPointeeType()), 3606 lv->isSpelledAsLValue()); 3607 break; 3608 } 3609 3610 case Type::RValueReference: { 3611 const auto *lv = cast<RValueReferenceType>(ty); 3612 result = getRValueReferenceType( 3613 getVariableArrayDecayedType(lv->getPointeeType())); 3614 break; 3615 } 3616 3617 case Type::Atomic: { 3618 const auto *at = cast<AtomicType>(ty); 3619 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3620 break; 3621 } 3622 3623 case Type::ConstantArray: { 3624 const auto *cat = cast<ConstantArrayType>(ty); 3625 result = getConstantArrayType( 3626 getVariableArrayDecayedType(cat->getElementType()), 3627 cat->getSize(), 3628 cat->getSizeExpr(), 3629 cat->getSizeModifier(), 3630 cat->getIndexTypeCVRQualifiers()); 3631 break; 3632 } 3633 3634 case Type::DependentSizedArray: { 3635 const auto *dat = cast<DependentSizedArrayType>(ty); 3636 result = getDependentSizedArrayType( 3637 getVariableArrayDecayedType(dat->getElementType()), 3638 dat->getSizeExpr(), 3639 dat->getSizeModifier(), 3640 dat->getIndexTypeCVRQualifiers(), 3641 dat->getBracketsRange()); 3642 break; 3643 } 3644 3645 // Turn incomplete types into [*] types. 3646 case Type::IncompleteArray: { 3647 const auto *iat = cast<IncompleteArrayType>(ty); 3648 result = getVariableArrayType( 3649 getVariableArrayDecayedType(iat->getElementType()), 3650 /*size*/ nullptr, 3651 ArrayType::Normal, 3652 iat->getIndexTypeCVRQualifiers(), 3653 SourceRange()); 3654 break; 3655 } 3656 3657 // Turn VLA types into [*] types. 3658 case Type::VariableArray: { 3659 const auto *vat = cast<VariableArrayType>(ty); 3660 result = getVariableArrayType( 3661 getVariableArrayDecayedType(vat->getElementType()), 3662 /*size*/ nullptr, 3663 ArrayType::Star, 3664 vat->getIndexTypeCVRQualifiers(), 3665 vat->getBracketsRange()); 3666 break; 3667 } 3668 } 3669 3670 // Apply the top-level qualifiers from the original. 3671 return getQualifiedType(result, split.Quals); 3672 } 3673 3674 /// getVariableArrayType - Returns a non-unique reference to the type for a 3675 /// variable array of the specified element type. 3676 QualType ASTContext::getVariableArrayType(QualType EltTy, 3677 Expr *NumElts, 3678 ArrayType::ArraySizeModifier ASM, 3679 unsigned IndexTypeQuals, 3680 SourceRange Brackets) const { 3681 // Since we don't unique expressions, it isn't possible to unique VLA's 3682 // that have an expression provided for their size. 3683 QualType Canon; 3684 3685 // Be sure to pull qualifiers off the element type. 3686 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3687 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3688 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3689 IndexTypeQuals, Brackets); 3690 Canon = getQualifiedType(Canon, canonSplit.Quals); 3691 } 3692 3693 auto *New = new (*this, TypeAlignment) 3694 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3695 3696 VariableArrayTypes.push_back(New); 3697 Types.push_back(New); 3698 return QualType(New, 0); 3699 } 3700 3701 /// getDependentSizedArrayType - Returns a non-unique reference to 3702 /// the type for a dependently-sized array of the specified element 3703 /// type. 3704 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3705 Expr *numElements, 3706 ArrayType::ArraySizeModifier ASM, 3707 unsigned elementTypeQuals, 3708 SourceRange brackets) const { 3709 assert((!numElements || numElements->isTypeDependent() || 3710 numElements->isValueDependent()) && 3711 "Size must be type- or value-dependent!"); 3712 3713 // Dependently-sized array types that do not have a specified number 3714 // of elements will have their sizes deduced from a dependent 3715 // initializer. We do no canonicalization here at all, which is okay 3716 // because they can't be used in most locations. 3717 if (!numElements) { 3718 auto *newType 3719 = new (*this, TypeAlignment) 3720 DependentSizedArrayType(*this, elementType, QualType(), 3721 numElements, ASM, elementTypeQuals, 3722 brackets); 3723 Types.push_back(newType); 3724 return QualType(newType, 0); 3725 } 3726 3727 // Otherwise, we actually build a new type every time, but we 3728 // also build a canonical type. 3729 3730 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3731 3732 void *insertPos = nullptr; 3733 llvm::FoldingSetNodeID ID; 3734 DependentSizedArrayType::Profile(ID, *this, 3735 QualType(canonElementType.Ty, 0), 3736 ASM, elementTypeQuals, numElements); 3737 3738 // Look for an existing type with these properties. 3739 DependentSizedArrayType *canonTy = 3740 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3741 3742 // If we don't have one, build one. 3743 if (!canonTy) { 3744 canonTy = new (*this, TypeAlignment) 3745 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), 3746 QualType(), numElements, ASM, elementTypeQuals, 3747 brackets); 3748 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3749 Types.push_back(canonTy); 3750 } 3751 3752 // Apply qualifiers from the element type to the array. 3753 QualType canon = getQualifiedType(QualType(canonTy,0), 3754 canonElementType.Quals); 3755 3756 // If we didn't need extra canonicalization for the element type or the size 3757 // expression, then just use that as our result. 3758 if (QualType(canonElementType.Ty, 0) == elementType && 3759 canonTy->getSizeExpr() == numElements) 3760 return canon; 3761 3762 // Otherwise, we need to build a type which follows the spelling 3763 // of the element type. 3764 auto *sugaredType 3765 = new (*this, TypeAlignment) 3766 DependentSizedArrayType(*this, elementType, canon, numElements, 3767 ASM, elementTypeQuals, brackets); 3768 Types.push_back(sugaredType); 3769 return QualType(sugaredType, 0); 3770 } 3771 3772 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3773 ArrayType::ArraySizeModifier ASM, 3774 unsigned elementTypeQuals) const { 3775 llvm::FoldingSetNodeID ID; 3776 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3777 3778 void *insertPos = nullptr; 3779 if (IncompleteArrayType *iat = 3780 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3781 return QualType(iat, 0); 3782 3783 // If the element type isn't canonical, this won't be a canonical type 3784 // either, so fill in the canonical type field. We also have to pull 3785 // qualifiers off the element type. 3786 QualType canon; 3787 3788 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3789 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3790 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3791 ASM, elementTypeQuals); 3792 canon = getQualifiedType(canon, canonSplit.Quals); 3793 3794 // Get the new insert position for the node we care about. 3795 IncompleteArrayType *existing = 3796 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3797 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3798 } 3799 3800 auto *newType = new (*this, TypeAlignment) 3801 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3802 3803 IncompleteArrayTypes.InsertNode(newType, insertPos); 3804 Types.push_back(newType); 3805 return QualType(newType, 0); 3806 } 3807 3808 ASTContext::BuiltinVectorTypeInfo 3809 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3810 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3811 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3812 NUMVECTORS}; 3813 3814 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3815 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3816 3817 switch (Ty->getKind()) { 3818 default: 3819 llvm_unreachable("Unsupported builtin vector type"); 3820 case BuiltinType::SveInt8: 3821 return SVE_INT_ELTTY(8, 16, true, 1); 3822 case BuiltinType::SveUint8: 3823 return SVE_INT_ELTTY(8, 16, false, 1); 3824 case BuiltinType::SveInt8x2: 3825 return SVE_INT_ELTTY(8, 16, true, 2); 3826 case BuiltinType::SveUint8x2: 3827 return SVE_INT_ELTTY(8, 16, false, 2); 3828 case BuiltinType::SveInt8x3: 3829 return SVE_INT_ELTTY(8, 16, true, 3); 3830 case BuiltinType::SveUint8x3: 3831 return SVE_INT_ELTTY(8, 16, false, 3); 3832 case BuiltinType::SveInt8x4: 3833 return SVE_INT_ELTTY(8, 16, true, 4); 3834 case BuiltinType::SveUint8x4: 3835 return SVE_INT_ELTTY(8, 16, false, 4); 3836 case BuiltinType::SveInt16: 3837 return SVE_INT_ELTTY(16, 8, true, 1); 3838 case BuiltinType::SveUint16: 3839 return SVE_INT_ELTTY(16, 8, false, 1); 3840 case BuiltinType::SveInt16x2: 3841 return SVE_INT_ELTTY(16, 8, true, 2); 3842 case BuiltinType::SveUint16x2: 3843 return SVE_INT_ELTTY(16, 8, false, 2); 3844 case BuiltinType::SveInt16x3: 3845 return SVE_INT_ELTTY(16, 8, true, 3); 3846 case BuiltinType::SveUint16x3: 3847 return SVE_INT_ELTTY(16, 8, false, 3); 3848 case BuiltinType::SveInt16x4: 3849 return SVE_INT_ELTTY(16, 8, true, 4); 3850 case BuiltinType::SveUint16x4: 3851 return SVE_INT_ELTTY(16, 8, false, 4); 3852 case BuiltinType::SveInt32: 3853 return SVE_INT_ELTTY(32, 4, true, 1); 3854 case BuiltinType::SveUint32: 3855 return SVE_INT_ELTTY(32, 4, false, 1); 3856 case BuiltinType::SveInt32x2: 3857 return SVE_INT_ELTTY(32, 4, true, 2); 3858 case BuiltinType::SveUint32x2: 3859 return SVE_INT_ELTTY(32, 4, false, 2); 3860 case BuiltinType::SveInt32x3: 3861 return SVE_INT_ELTTY(32, 4, true, 3); 3862 case BuiltinType::SveUint32x3: 3863 return SVE_INT_ELTTY(32, 4, false, 3); 3864 case BuiltinType::SveInt32x4: 3865 return SVE_INT_ELTTY(32, 4, true, 4); 3866 case BuiltinType::SveUint32x4: 3867 return SVE_INT_ELTTY(32, 4, false, 4); 3868 case BuiltinType::SveInt64: 3869 return SVE_INT_ELTTY(64, 2, true, 1); 3870 case BuiltinType::SveUint64: 3871 return SVE_INT_ELTTY(64, 2, false, 1); 3872 case BuiltinType::SveInt64x2: 3873 return SVE_INT_ELTTY(64, 2, true, 2); 3874 case BuiltinType::SveUint64x2: 3875 return SVE_INT_ELTTY(64, 2, false, 2); 3876 case BuiltinType::SveInt64x3: 3877 return SVE_INT_ELTTY(64, 2, true, 3); 3878 case BuiltinType::SveUint64x3: 3879 return SVE_INT_ELTTY(64, 2, false, 3); 3880 case BuiltinType::SveInt64x4: 3881 return SVE_INT_ELTTY(64, 2, true, 4); 3882 case BuiltinType::SveUint64x4: 3883 return SVE_INT_ELTTY(64, 2, false, 4); 3884 case BuiltinType::SveBool: 3885 return SVE_ELTTY(BoolTy, 16, 1); 3886 case BuiltinType::SveFloat16: 3887 return SVE_ELTTY(HalfTy, 8, 1); 3888 case BuiltinType::SveFloat16x2: 3889 return SVE_ELTTY(HalfTy, 8, 2); 3890 case BuiltinType::SveFloat16x3: 3891 return SVE_ELTTY(HalfTy, 8, 3); 3892 case BuiltinType::SveFloat16x4: 3893 return SVE_ELTTY(HalfTy, 8, 4); 3894 case BuiltinType::SveFloat32: 3895 return SVE_ELTTY(FloatTy, 4, 1); 3896 case BuiltinType::SveFloat32x2: 3897 return SVE_ELTTY(FloatTy, 4, 2); 3898 case BuiltinType::SveFloat32x3: 3899 return SVE_ELTTY(FloatTy, 4, 3); 3900 case BuiltinType::SveFloat32x4: 3901 return SVE_ELTTY(FloatTy, 4, 4); 3902 case BuiltinType::SveFloat64: 3903 return SVE_ELTTY(DoubleTy, 2, 1); 3904 case BuiltinType::SveFloat64x2: 3905 return SVE_ELTTY(DoubleTy, 2, 2); 3906 case BuiltinType::SveFloat64x3: 3907 return SVE_ELTTY(DoubleTy, 2, 3); 3908 case BuiltinType::SveFloat64x4: 3909 return SVE_ELTTY(DoubleTy, 2, 4); 3910 case BuiltinType::SveBFloat16: 3911 return SVE_ELTTY(BFloat16Ty, 8, 1); 3912 case BuiltinType::SveBFloat16x2: 3913 return SVE_ELTTY(BFloat16Ty, 8, 2); 3914 case BuiltinType::SveBFloat16x3: 3915 return SVE_ELTTY(BFloat16Ty, 8, 3); 3916 case BuiltinType::SveBFloat16x4: 3917 return SVE_ELTTY(BFloat16Ty, 8, 4); 3918 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3919 IsSigned) \ 3920 case BuiltinType::Id: \ 3921 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3922 llvm::ElementCount::getScalable(NumEls), NF}; 3923 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3924 case BuiltinType::Id: \ 3925 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3926 llvm::ElementCount::getScalable(NumEls), NF}; 3927 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3928 case BuiltinType::Id: \ 3929 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3930 #include "clang/Basic/RISCVVTypes.def" 3931 } 3932 } 3933 3934 /// getScalableVectorType - Return the unique reference to a scalable vector 3935 /// type of the specified element type and size. VectorType must be a built-in 3936 /// type. 3937 QualType ASTContext::getScalableVectorType(QualType EltTy, 3938 unsigned NumElts) const { 3939 if (Target->hasAArch64SVETypes()) { 3940 uint64_t EltTySize = getTypeSize(EltTy); 3941 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3942 IsSigned, IsFP, IsBF) \ 3943 if (!EltTy->isBooleanType() && \ 3944 ((EltTy->hasIntegerRepresentation() && \ 3945 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3946 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3947 IsFP && !IsBF) || \ 3948 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3949 IsBF && !IsFP)) && \ 3950 EltTySize == ElBits && NumElts == NumEls) { \ 3951 return SingletonId; \ 3952 } 3953 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3954 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3955 return SingletonId; 3956 #include "clang/Basic/AArch64SVEACLETypes.def" 3957 } else if (Target->hasRISCVVTypes()) { 3958 uint64_t EltTySize = getTypeSize(EltTy); 3959 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3960 IsFP) \ 3961 if (!EltTy->isBooleanType() && \ 3962 ((EltTy->hasIntegerRepresentation() && \ 3963 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3964 (EltTy->hasFloatingRepresentation() && IsFP)) && \ 3965 EltTySize == ElBits && NumElts == NumEls) \ 3966 return SingletonId; 3967 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3968 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3969 return SingletonId; 3970 #include "clang/Basic/RISCVVTypes.def" 3971 } 3972 return QualType(); 3973 } 3974 3975 /// getVectorType - Return the unique reference to a vector type of 3976 /// the specified element type and size. VectorType must be a built-in type. 3977 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 3978 VectorType::VectorKind VecKind) const { 3979 assert(vecType->isBuiltinType()); 3980 3981 // Check if we've already instantiated a vector of this type. 3982 llvm::FoldingSetNodeID ID; 3983 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 3984 3985 void *InsertPos = nullptr; 3986 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 3987 return QualType(VTP, 0); 3988 3989 // If the element type isn't canonical, this won't be a canonical type either, 3990 // so fill in the canonical type field. 3991 QualType Canonical; 3992 if (!vecType.isCanonical()) { 3993 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 3994 3995 // Get the new insert position for the node we care about. 3996 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 3997 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3998 } 3999 auto *New = new (*this, TypeAlignment) 4000 VectorType(vecType, NumElts, Canonical, VecKind); 4001 VectorTypes.InsertNode(New, InsertPos); 4002 Types.push_back(New); 4003 return QualType(New, 0); 4004 } 4005 4006 QualType 4007 ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4008 SourceLocation AttrLoc, 4009 VectorType::VectorKind VecKind) const { 4010 llvm::FoldingSetNodeID ID; 4011 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4012 VecKind); 4013 void *InsertPos = nullptr; 4014 DependentVectorType *Canon = 4015 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4016 DependentVectorType *New; 4017 4018 if (Canon) { 4019 New = new (*this, TypeAlignment) DependentVectorType( 4020 *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4021 } else { 4022 QualType CanonVecTy = getCanonicalType(VecType); 4023 if (CanonVecTy == VecType) { 4024 New = new (*this, TypeAlignment) DependentVectorType( 4025 *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4026 4027 DependentVectorType *CanonCheck = 4028 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4029 assert(!CanonCheck && 4030 "Dependent-sized vector_size canonical type broken"); 4031 (void)CanonCheck; 4032 DependentVectorTypes.InsertNode(New, InsertPos); 4033 } else { 4034 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4035 SourceLocation(), VecKind); 4036 New = new (*this, TypeAlignment) DependentVectorType( 4037 *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4038 } 4039 } 4040 4041 Types.push_back(New); 4042 return QualType(New, 0); 4043 } 4044 4045 /// getExtVectorType - Return the unique reference to an extended vector type of 4046 /// the specified element type and size. VectorType must be a built-in type. 4047 QualType 4048 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { 4049 assert(vecType->isBuiltinType() || vecType->isDependentType()); 4050 4051 // Check if we've already instantiated a vector of this type. 4052 llvm::FoldingSetNodeID ID; 4053 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4054 VectorType::GenericVector); 4055 void *InsertPos = nullptr; 4056 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4057 return QualType(VTP, 0); 4058 4059 // If the element type isn't canonical, this won't be a canonical type either, 4060 // so fill in the canonical type field. 4061 QualType Canonical; 4062 if (!vecType.isCanonical()) { 4063 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4064 4065 // Get the new insert position for the node we care about. 4066 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4067 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4068 } 4069 auto *New = new (*this, TypeAlignment) 4070 ExtVectorType(vecType, NumElts, Canonical); 4071 VectorTypes.InsertNode(New, InsertPos); 4072 Types.push_back(New); 4073 return QualType(New, 0); 4074 } 4075 4076 QualType 4077 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4078 Expr *SizeExpr, 4079 SourceLocation AttrLoc) const { 4080 llvm::FoldingSetNodeID ID; 4081 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4082 SizeExpr); 4083 4084 void *InsertPos = nullptr; 4085 DependentSizedExtVectorType *Canon 4086 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4087 DependentSizedExtVectorType *New; 4088 if (Canon) { 4089 // We already have a canonical version of this array type; use it as 4090 // the canonical type for a newly-built type. 4091 New = new (*this, TypeAlignment) 4092 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 4093 SizeExpr, AttrLoc); 4094 } else { 4095 QualType CanonVecTy = getCanonicalType(vecType); 4096 if (CanonVecTy == vecType) { 4097 New = new (*this, TypeAlignment) 4098 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 4099 AttrLoc); 4100 4101 DependentSizedExtVectorType *CanonCheck 4102 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4103 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4104 (void)CanonCheck; 4105 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4106 } else { 4107 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4108 SourceLocation()); 4109 New = new (*this, TypeAlignment) DependentSizedExtVectorType( 4110 *this, vecType, CanonExtTy, SizeExpr, AttrLoc); 4111 } 4112 } 4113 4114 Types.push_back(New); 4115 return QualType(New, 0); 4116 } 4117 4118 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4119 unsigned NumColumns) const { 4120 llvm::FoldingSetNodeID ID; 4121 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4122 Type::ConstantMatrix); 4123 4124 assert(MatrixType::isValidElementType(ElementTy) && 4125 "need a valid element type"); 4126 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4127 ConstantMatrixType::isDimensionValid(NumColumns) && 4128 "need valid matrix dimensions"); 4129 void *InsertPos = nullptr; 4130 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4131 return QualType(MTP, 0); 4132 4133 QualType Canonical; 4134 if (!ElementTy.isCanonical()) { 4135 Canonical = 4136 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4137 4138 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4139 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4140 (void)NewIP; 4141 } 4142 4143 auto *New = new (*this, TypeAlignment) 4144 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4145 MatrixTypes.InsertNode(New, InsertPos); 4146 Types.push_back(New); 4147 return QualType(New, 0); 4148 } 4149 4150 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4151 Expr *RowExpr, 4152 Expr *ColumnExpr, 4153 SourceLocation AttrLoc) const { 4154 QualType CanonElementTy = getCanonicalType(ElementTy); 4155 llvm::FoldingSetNodeID ID; 4156 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4157 ColumnExpr); 4158 4159 void *InsertPos = nullptr; 4160 DependentSizedMatrixType *Canon = 4161 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4162 4163 if (!Canon) { 4164 Canon = new (*this, TypeAlignment) DependentSizedMatrixType( 4165 *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); 4166 #ifndef NDEBUG 4167 DependentSizedMatrixType *CanonCheck = 4168 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4169 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4170 #endif 4171 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4172 Types.push_back(Canon); 4173 } 4174 4175 // Already have a canonical version of the matrix type 4176 // 4177 // If it exactly matches the requested type, use it directly. 4178 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4179 Canon->getRowExpr() == ColumnExpr) 4180 return QualType(Canon, 0); 4181 4182 // Use Canon as the canonical type for newly-built type. 4183 DependentSizedMatrixType *New = new (*this, TypeAlignment) 4184 DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, 4185 ColumnExpr, AttrLoc); 4186 Types.push_back(New); 4187 return QualType(New, 0); 4188 } 4189 4190 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4191 Expr *AddrSpaceExpr, 4192 SourceLocation AttrLoc) const { 4193 assert(AddrSpaceExpr->isInstantiationDependent()); 4194 4195 QualType canonPointeeType = getCanonicalType(PointeeType); 4196 4197 void *insertPos = nullptr; 4198 llvm::FoldingSetNodeID ID; 4199 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4200 AddrSpaceExpr); 4201 4202 DependentAddressSpaceType *canonTy = 4203 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4204 4205 if (!canonTy) { 4206 canonTy = new (*this, TypeAlignment) 4207 DependentAddressSpaceType(*this, canonPointeeType, 4208 QualType(), AddrSpaceExpr, AttrLoc); 4209 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4210 Types.push_back(canonTy); 4211 } 4212 4213 if (canonPointeeType == PointeeType && 4214 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4215 return QualType(canonTy, 0); 4216 4217 auto *sugaredType 4218 = new (*this, TypeAlignment) 4219 DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), 4220 AddrSpaceExpr, AttrLoc); 4221 Types.push_back(sugaredType); 4222 return QualType(sugaredType, 0); 4223 } 4224 4225 /// Determine whether \p T is canonical as the result type of a function. 4226 static bool isCanonicalResultType(QualType T) { 4227 return T.isCanonical() && 4228 (T.getObjCLifetime() == Qualifiers::OCL_None || 4229 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4230 } 4231 4232 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4233 QualType 4234 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4235 const FunctionType::ExtInfo &Info) const { 4236 // Unique functions, to guarantee there is only one function of a particular 4237 // structure. 4238 llvm::FoldingSetNodeID ID; 4239 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4240 4241 void *InsertPos = nullptr; 4242 if (FunctionNoProtoType *FT = 4243 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4244 return QualType(FT, 0); 4245 4246 QualType Canonical; 4247 if (!isCanonicalResultType(ResultTy)) { 4248 Canonical = 4249 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4250 4251 // Get the new insert position for the node we care about. 4252 FunctionNoProtoType *NewIP = 4253 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4254 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4255 } 4256 4257 auto *New = new (*this, TypeAlignment) 4258 FunctionNoProtoType(ResultTy, Canonical, Info); 4259 Types.push_back(New); 4260 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4261 return QualType(New, 0); 4262 } 4263 4264 CanQualType 4265 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4266 CanQualType CanResultType = getCanonicalType(ResultType); 4267 4268 // Canonical result types do not have ARC lifetime qualifiers. 4269 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4270 Qualifiers Qs = CanResultType.getQualifiers(); 4271 Qs.removeObjCLifetime(); 4272 return CanQualType::CreateUnsafe( 4273 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4274 } 4275 4276 return CanResultType; 4277 } 4278 4279 static bool isCanonicalExceptionSpecification( 4280 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4281 if (ESI.Type == EST_None) 4282 return true; 4283 if (!NoexceptInType) 4284 return false; 4285 4286 // C++17 onwards: exception specification is part of the type, as a simple 4287 // boolean "can this function type throw". 4288 if (ESI.Type == EST_BasicNoexcept) 4289 return true; 4290 4291 // A noexcept(expr) specification is (possibly) canonical if expr is 4292 // value-dependent. 4293 if (ESI.Type == EST_DependentNoexcept) 4294 return true; 4295 4296 // A dynamic exception specification is canonical if it only contains pack 4297 // expansions (so we can't tell whether it's non-throwing) and all its 4298 // contained types are canonical. 4299 if (ESI.Type == EST_Dynamic) { 4300 bool AnyPackExpansions = false; 4301 for (QualType ET : ESI.Exceptions) { 4302 if (!ET.isCanonical()) 4303 return false; 4304 if (ET->getAs<PackExpansionType>()) 4305 AnyPackExpansions = true; 4306 } 4307 return AnyPackExpansions; 4308 } 4309 4310 return false; 4311 } 4312 4313 QualType ASTContext::getFunctionTypeInternal( 4314 QualType ResultTy, ArrayRef<QualType> ArgArray, 4315 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4316 size_t NumArgs = ArgArray.size(); 4317 4318 // Unique functions, to guarantee there is only one function of a particular 4319 // structure. 4320 llvm::FoldingSetNodeID ID; 4321 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4322 *this, true); 4323 4324 QualType Canonical; 4325 bool Unique = false; 4326 4327 void *InsertPos = nullptr; 4328 if (FunctionProtoType *FPT = 4329 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4330 QualType Existing = QualType(FPT, 0); 4331 4332 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4333 // it so long as our exception specification doesn't contain a dependent 4334 // noexcept expression, or we're just looking for a canonical type. 4335 // Otherwise, we're going to need to create a type 4336 // sugar node to hold the concrete expression. 4337 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4338 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4339 return Existing; 4340 4341 // We need a new type sugar node for this one, to hold the new noexcept 4342 // expression. We do no canonicalization here, but that's OK since we don't 4343 // expect to see the same noexcept expression much more than once. 4344 Canonical = getCanonicalType(Existing); 4345 Unique = true; 4346 } 4347 4348 bool NoexceptInType = getLangOpts().CPlusPlus17; 4349 bool IsCanonicalExceptionSpec = 4350 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4351 4352 // Determine whether the type being created is already canonical or not. 4353 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4354 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4355 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4356 if (!ArgArray[i].isCanonicalAsParam()) 4357 isCanonical = false; 4358 4359 if (OnlyWantCanonical) 4360 assert(isCanonical && 4361 "given non-canonical parameters constructing canonical type"); 4362 4363 // If this type isn't canonical, get the canonical version of it if we don't 4364 // already have it. The exception spec is only partially part of the 4365 // canonical type, and only in C++17 onwards. 4366 if (!isCanonical && Canonical.isNull()) { 4367 SmallVector<QualType, 16> CanonicalArgs; 4368 CanonicalArgs.reserve(NumArgs); 4369 for (unsigned i = 0; i != NumArgs; ++i) 4370 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4371 4372 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4373 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4374 CanonicalEPI.HasTrailingReturn = false; 4375 4376 if (IsCanonicalExceptionSpec) { 4377 // Exception spec is already OK. 4378 } else if (NoexceptInType) { 4379 switch (EPI.ExceptionSpec.Type) { 4380 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4381 // We don't know yet. It shouldn't matter what we pick here; no-one 4382 // should ever look at this. 4383 LLVM_FALLTHROUGH; 4384 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4385 CanonicalEPI.ExceptionSpec.Type = EST_None; 4386 break; 4387 4388 // A dynamic exception specification is almost always "not noexcept", 4389 // with the exception that a pack expansion might expand to no types. 4390 case EST_Dynamic: { 4391 bool AnyPacks = false; 4392 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4393 if (ET->getAs<PackExpansionType>()) 4394 AnyPacks = true; 4395 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4396 } 4397 if (!AnyPacks) 4398 CanonicalEPI.ExceptionSpec.Type = EST_None; 4399 else { 4400 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4401 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4402 } 4403 break; 4404 } 4405 4406 case EST_DynamicNone: 4407 case EST_BasicNoexcept: 4408 case EST_NoexceptTrue: 4409 case EST_NoThrow: 4410 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4411 break; 4412 4413 case EST_DependentNoexcept: 4414 llvm_unreachable("dependent noexcept is already canonical"); 4415 } 4416 } else { 4417 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4418 } 4419 4420 // Adjust the canonical function result type. 4421 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4422 Canonical = 4423 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4424 4425 // Get the new insert position for the node we care about. 4426 FunctionProtoType *NewIP = 4427 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4428 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4429 } 4430 4431 // Compute the needed size to hold this FunctionProtoType and the 4432 // various trailing objects. 4433 auto ESH = FunctionProtoType::getExceptionSpecSize( 4434 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4435 size_t Size = FunctionProtoType::totalSizeToAlloc< 4436 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4437 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4438 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4439 NumArgs, EPI.Variadic, 4440 FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type), 4441 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4442 EPI.ExtParameterInfos ? NumArgs : 0, 4443 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4444 4445 auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); 4446 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4447 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4448 Types.push_back(FTP); 4449 if (!Unique) 4450 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4451 return QualType(FTP, 0); 4452 } 4453 4454 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4455 llvm::FoldingSetNodeID ID; 4456 PipeType::Profile(ID, T, ReadOnly); 4457 4458 void *InsertPos = nullptr; 4459 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4460 return QualType(PT, 0); 4461 4462 // If the pipe element type isn't canonical, this won't be a canonical type 4463 // either, so fill in the canonical type field. 4464 QualType Canonical; 4465 if (!T.isCanonical()) { 4466 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4467 4468 // Get the new insert position for the node we care about. 4469 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4470 assert(!NewIP && "Shouldn't be in the map!"); 4471 (void)NewIP; 4472 } 4473 auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); 4474 Types.push_back(New); 4475 PipeTypes.InsertNode(New, InsertPos); 4476 return QualType(New, 0); 4477 } 4478 4479 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4480 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4481 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4482 : Ty; 4483 } 4484 4485 QualType ASTContext::getReadPipeType(QualType T) const { 4486 return getPipeType(T, true); 4487 } 4488 4489 QualType ASTContext::getWritePipeType(QualType T) const { 4490 return getPipeType(T, false); 4491 } 4492 4493 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4494 llvm::FoldingSetNodeID ID; 4495 BitIntType::Profile(ID, IsUnsigned, NumBits); 4496 4497 void *InsertPos = nullptr; 4498 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4499 return QualType(EIT, 0); 4500 4501 auto *New = new (*this, TypeAlignment) BitIntType(IsUnsigned, NumBits); 4502 BitIntTypes.InsertNode(New, InsertPos); 4503 Types.push_back(New); 4504 return QualType(New, 0); 4505 } 4506 4507 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4508 Expr *NumBitsExpr) const { 4509 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4510 llvm::FoldingSetNodeID ID; 4511 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4512 4513 void *InsertPos = nullptr; 4514 if (DependentBitIntType *Existing = 4515 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4516 return QualType(Existing, 0); 4517 4518 auto *New = new (*this, TypeAlignment) 4519 DependentBitIntType(*this, IsUnsigned, NumBitsExpr); 4520 DependentBitIntTypes.InsertNode(New, InsertPos); 4521 4522 Types.push_back(New); 4523 return QualType(New, 0); 4524 } 4525 4526 #ifndef NDEBUG 4527 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4528 if (!isa<CXXRecordDecl>(D)) return false; 4529 const auto *RD = cast<CXXRecordDecl>(D); 4530 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4531 return true; 4532 if (RD->getDescribedClassTemplate() && 4533 !isa<ClassTemplateSpecializationDecl>(RD)) 4534 return true; 4535 return false; 4536 } 4537 #endif 4538 4539 /// getInjectedClassNameType - Return the unique reference to the 4540 /// injected class name type for the specified templated declaration. 4541 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4542 QualType TST) const { 4543 assert(NeedsInjectedClassNameType(Decl)); 4544 if (Decl->TypeForDecl) { 4545 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4546 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4547 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4548 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4549 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4550 } else { 4551 Type *newType = 4552 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 4553 Decl->TypeForDecl = newType; 4554 Types.push_back(newType); 4555 } 4556 return QualType(Decl->TypeForDecl, 0); 4557 } 4558 4559 /// getTypeDeclType - Return the unique reference to the type for the 4560 /// specified type declaration. 4561 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4562 assert(Decl && "Passed null for Decl param"); 4563 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4564 4565 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4566 return getTypedefType(Typedef); 4567 4568 assert(!isa<TemplateTypeParmDecl>(Decl) && 4569 "Template type parameter types are always available."); 4570 4571 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4572 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4573 assert(!NeedsInjectedClassNameType(Record)); 4574 return getRecordType(Record); 4575 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4576 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4577 return getEnumType(Enum); 4578 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4579 return getUnresolvedUsingType(Using); 4580 } else 4581 llvm_unreachable("TypeDecl without a type?"); 4582 4583 return QualType(Decl->TypeForDecl, 0); 4584 } 4585 4586 /// getTypedefType - Return the unique reference to the type for the 4587 /// specified typedef name decl. 4588 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4589 QualType Underlying) const { 4590 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4591 4592 if (Underlying.isNull()) 4593 Underlying = Decl->getUnderlyingType(); 4594 QualType Canonical = getCanonicalType(Underlying); 4595 auto *newType = new (*this, TypeAlignment) 4596 TypedefType(Type::Typedef, Decl, Underlying, Canonical); 4597 Decl->TypeForDecl = newType; 4598 Types.push_back(newType); 4599 return QualType(newType, 0); 4600 } 4601 4602 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4603 QualType Underlying) const { 4604 llvm::FoldingSetNodeID ID; 4605 UsingType::Profile(ID, Found); 4606 4607 void *InsertPos = nullptr; 4608 UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos); 4609 if (T) 4610 return QualType(T, 0); 4611 4612 assert(!Underlying.hasLocalQualifiers()); 4613 assert(Underlying == getTypeDeclType(cast<TypeDecl>(Found->getTargetDecl()))); 4614 QualType Canon = Underlying.getCanonicalType(); 4615 4616 UsingType *NewType = 4617 new (*this, TypeAlignment) UsingType(Found, Underlying, Canon); 4618 Types.push_back(NewType); 4619 UsingTypes.InsertNode(NewType, InsertPos); 4620 return QualType(NewType, 0); 4621 } 4622 4623 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4624 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4625 4626 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4627 if (PrevDecl->TypeForDecl) 4628 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4629 4630 auto *newType = new (*this, TypeAlignment) RecordType(Decl); 4631 Decl->TypeForDecl = newType; 4632 Types.push_back(newType); 4633 return QualType(newType, 0); 4634 } 4635 4636 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4637 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4638 4639 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4640 if (PrevDecl->TypeForDecl) 4641 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4642 4643 auto *newType = new (*this, TypeAlignment) EnumType(Decl); 4644 Decl->TypeForDecl = newType; 4645 Types.push_back(newType); 4646 return QualType(newType, 0); 4647 } 4648 4649 QualType ASTContext::getUnresolvedUsingType( 4650 const UnresolvedUsingTypenameDecl *Decl) const { 4651 if (Decl->TypeForDecl) 4652 return QualType(Decl->TypeForDecl, 0); 4653 4654 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4655 Decl->getCanonicalDecl()) 4656 if (CanonicalDecl->TypeForDecl) 4657 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4658 4659 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Decl); 4660 Decl->TypeForDecl = newType; 4661 Types.push_back(newType); 4662 return QualType(newType, 0); 4663 } 4664 4665 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4666 QualType modifiedType, 4667 QualType equivalentType) { 4668 llvm::FoldingSetNodeID id; 4669 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4670 4671 void *insertPos = nullptr; 4672 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4673 if (type) return QualType(type, 0); 4674 4675 QualType canon = getCanonicalType(equivalentType); 4676 type = new (*this, TypeAlignment) 4677 AttributedType(canon, attrKind, modifiedType, equivalentType); 4678 4679 Types.push_back(type); 4680 AttributedTypes.InsertNode(type, insertPos); 4681 4682 return QualType(type, 0); 4683 } 4684 4685 /// Retrieve a substitution-result type. 4686 QualType 4687 ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, 4688 QualType Replacement) const { 4689 assert(Replacement.isCanonical() 4690 && "replacement types must always be canonical"); 4691 4692 llvm::FoldingSetNodeID ID; 4693 SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); 4694 void *InsertPos = nullptr; 4695 SubstTemplateTypeParmType *SubstParm 4696 = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4697 4698 if (!SubstParm) { 4699 SubstParm = new (*this, TypeAlignment) 4700 SubstTemplateTypeParmType(Parm, Replacement); 4701 Types.push_back(SubstParm); 4702 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4703 } 4704 4705 return QualType(SubstParm, 0); 4706 } 4707 4708 /// Retrieve a 4709 QualType ASTContext::getSubstTemplateTypeParmPackType( 4710 const TemplateTypeParmType *Parm, 4711 const TemplateArgument &ArgPack) { 4712 #ifndef NDEBUG 4713 for (const auto &P : ArgPack.pack_elements()) { 4714 assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type"); 4715 assert(P.getAsType().isCanonical() && "Pack contains non-canonical type"); 4716 } 4717 #endif 4718 4719 llvm::FoldingSetNodeID ID; 4720 SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); 4721 void *InsertPos = nullptr; 4722 if (SubstTemplateTypeParmPackType *SubstParm 4723 = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4724 return QualType(SubstParm, 0); 4725 4726 QualType Canon; 4727 if (!Parm->isCanonicalUnqualified()) { 4728 Canon = getCanonicalType(QualType(Parm, 0)); 4729 Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), 4730 ArgPack); 4731 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4732 } 4733 4734 auto *SubstParm 4735 = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, 4736 ArgPack); 4737 Types.push_back(SubstParm); 4738 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4739 return QualType(SubstParm, 0); 4740 } 4741 4742 /// Retrieve the template type parameter type for a template 4743 /// parameter or parameter pack with the given depth, index, and (optionally) 4744 /// name. 4745 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4746 bool ParameterPack, 4747 TemplateTypeParmDecl *TTPDecl) const { 4748 llvm::FoldingSetNodeID ID; 4749 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4750 void *InsertPos = nullptr; 4751 TemplateTypeParmType *TypeParm 4752 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4753 4754 if (TypeParm) 4755 return QualType(TypeParm, 0); 4756 4757 if (TTPDecl) { 4758 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4759 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 4760 4761 TemplateTypeParmType *TypeCheck 4762 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4763 assert(!TypeCheck && "Template type parameter canonical type broken"); 4764 (void)TypeCheck; 4765 } else 4766 TypeParm = new (*this, TypeAlignment) 4767 TemplateTypeParmType(Depth, Index, ParameterPack); 4768 4769 Types.push_back(TypeParm); 4770 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4771 4772 return QualType(TypeParm, 0); 4773 } 4774 4775 TypeSourceInfo * 4776 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4777 SourceLocation NameLoc, 4778 const TemplateArgumentListInfo &Args, 4779 QualType Underlying) const { 4780 assert(!Name.getAsDependentTemplateName() && 4781 "No dependent template names here!"); 4782 QualType TST = getTemplateSpecializationType(Name, Args, Underlying); 4783 4784 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4785 TemplateSpecializationTypeLoc TL = 4786 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4787 TL.setTemplateKeywordLoc(SourceLocation()); 4788 TL.setTemplateNameLoc(NameLoc); 4789 TL.setLAngleLoc(Args.getLAngleLoc()); 4790 TL.setRAngleLoc(Args.getRAngleLoc()); 4791 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4792 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4793 return DI; 4794 } 4795 4796 QualType 4797 ASTContext::getTemplateSpecializationType(TemplateName Template, 4798 const TemplateArgumentListInfo &Args, 4799 QualType Underlying) const { 4800 assert(!Template.getAsDependentTemplateName() && 4801 "No dependent template names here!"); 4802 4803 SmallVector<TemplateArgument, 4> ArgVec; 4804 ArgVec.reserve(Args.size()); 4805 for (const TemplateArgumentLoc &Arg : Args.arguments()) 4806 ArgVec.push_back(Arg.getArgument()); 4807 4808 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4809 } 4810 4811 #ifndef NDEBUG 4812 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4813 for (const TemplateArgument &Arg : Args) 4814 if (Arg.isPackExpansion()) 4815 return true; 4816 4817 return true; 4818 } 4819 #endif 4820 4821 QualType 4822 ASTContext::getTemplateSpecializationType(TemplateName Template, 4823 ArrayRef<TemplateArgument> Args, 4824 QualType Underlying) const { 4825 assert(!Template.getAsDependentTemplateName() && 4826 "No dependent template names here!"); 4827 // Look through qualified template names. 4828 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4829 Template = TemplateName(QTN->getTemplateDecl()); 4830 4831 bool IsTypeAlias = 4832 Template.getAsTemplateDecl() && 4833 isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); 4834 QualType CanonType; 4835 if (!Underlying.isNull()) 4836 CanonType = getCanonicalType(Underlying); 4837 else { 4838 // We can get here with an alias template when the specialization contains 4839 // a pack expansion that does not match up with a parameter pack. 4840 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4841 "Caller must compute aliased type"); 4842 IsTypeAlias = false; 4843 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4844 } 4845 4846 // Allocate the (non-canonical) template specialization type, but don't 4847 // try to unique it: these types typically have location information that 4848 // we don't unique and don't want to lose. 4849 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4850 sizeof(TemplateArgument) * Args.size() + 4851 (IsTypeAlias? sizeof(QualType) : 0), 4852 TypeAlignment); 4853 auto *Spec 4854 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4855 IsTypeAlias ? Underlying : QualType()); 4856 4857 Types.push_back(Spec); 4858 return QualType(Spec, 0); 4859 } 4860 4861 static bool 4862 getCanonicalTemplateArguments(const ASTContext &C, 4863 ArrayRef<TemplateArgument> OrigArgs, 4864 SmallVectorImpl<TemplateArgument> &CanonArgs) { 4865 bool AnyNonCanonArgs = false; 4866 unsigned NumArgs = OrigArgs.size(); 4867 CanonArgs.resize(NumArgs); 4868 for (unsigned I = 0; I != NumArgs; ++I) { 4869 const TemplateArgument &OrigArg = OrigArgs[I]; 4870 TemplateArgument &CanonArg = CanonArgs[I]; 4871 CanonArg = C.getCanonicalTemplateArgument(OrigArg); 4872 if (!CanonArg.structurallyEquals(OrigArg)) 4873 AnyNonCanonArgs = true; 4874 } 4875 return AnyNonCanonArgs; 4876 } 4877 4878 QualType ASTContext::getCanonicalTemplateSpecializationType( 4879 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4880 assert(!Template.getAsDependentTemplateName() && 4881 "No dependent template names here!"); 4882 4883 // Look through qualified template names. 4884 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4885 Template = TemplateName(QTN->getTemplateDecl()); 4886 4887 // Build the canonical template specialization type. 4888 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4889 SmallVector<TemplateArgument, 4> CanonArgs; 4890 ::getCanonicalTemplateArguments(*this, Args, CanonArgs); 4891 4892 // Determine whether this canonical template specialization type already 4893 // exists. 4894 llvm::FoldingSetNodeID ID; 4895 TemplateSpecializationType::Profile(ID, CanonTemplate, 4896 CanonArgs, *this); 4897 4898 void *InsertPos = nullptr; 4899 TemplateSpecializationType *Spec 4900 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4901 4902 if (!Spec) { 4903 // Allocate a new canonical template specialization type. 4904 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4905 sizeof(TemplateArgument) * CanonArgs.size()), 4906 TypeAlignment); 4907 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 4908 CanonArgs, 4909 QualType(), QualType()); 4910 Types.push_back(Spec); 4911 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 4912 } 4913 4914 assert(Spec->isDependentType() && 4915 "Non-dependent template-id type must have a canonical type"); 4916 return QualType(Spec, 0); 4917 } 4918 4919 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 4920 NestedNameSpecifier *NNS, 4921 QualType NamedType, 4922 TagDecl *OwnedTagDecl) const { 4923 llvm::FoldingSetNodeID ID; 4924 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 4925 4926 void *InsertPos = nullptr; 4927 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4928 if (T) 4929 return QualType(T, 0); 4930 4931 QualType Canon = NamedType; 4932 if (!Canon.isCanonical()) { 4933 Canon = getCanonicalType(NamedType); 4934 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4935 assert(!CheckT && "Elaborated canonical type broken"); 4936 (void)CheckT; 4937 } 4938 4939 void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 4940 TypeAlignment); 4941 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 4942 4943 Types.push_back(T); 4944 ElaboratedTypes.InsertNode(T, InsertPos); 4945 return QualType(T, 0); 4946 } 4947 4948 QualType 4949 ASTContext::getParenType(QualType InnerType) const { 4950 llvm::FoldingSetNodeID ID; 4951 ParenType::Profile(ID, InnerType); 4952 4953 void *InsertPos = nullptr; 4954 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 4955 if (T) 4956 return QualType(T, 0); 4957 4958 QualType Canon = InnerType; 4959 if (!Canon.isCanonical()) { 4960 Canon = getCanonicalType(InnerType); 4961 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 4962 assert(!CheckT && "Paren canonical type broken"); 4963 (void)CheckT; 4964 } 4965 4966 T = new (*this, TypeAlignment) ParenType(InnerType, Canon); 4967 Types.push_back(T); 4968 ParenTypes.InsertNode(T, InsertPos); 4969 return QualType(T, 0); 4970 } 4971 4972 QualType 4973 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 4974 const IdentifierInfo *MacroII) const { 4975 QualType Canon = UnderlyingTy; 4976 if (!Canon.isCanonical()) 4977 Canon = getCanonicalType(UnderlyingTy); 4978 4979 auto *newType = new (*this, TypeAlignment) 4980 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 4981 Types.push_back(newType); 4982 return QualType(newType, 0); 4983 } 4984 4985 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 4986 NestedNameSpecifier *NNS, 4987 const IdentifierInfo *Name, 4988 QualType Canon) const { 4989 if (Canon.isNull()) { 4990 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 4991 if (CanonNNS != NNS) 4992 Canon = getDependentNameType(Keyword, CanonNNS, Name); 4993 } 4994 4995 llvm::FoldingSetNodeID ID; 4996 DependentNameType::Profile(ID, Keyword, NNS, Name); 4997 4998 void *InsertPos = nullptr; 4999 DependentNameType *T 5000 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5001 if (T) 5002 return QualType(T, 0); 5003 5004 T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); 5005 Types.push_back(T); 5006 DependentNameTypes.InsertNode(T, InsertPos); 5007 return QualType(T, 0); 5008 } 5009 5010 QualType 5011 ASTContext::getDependentTemplateSpecializationType( 5012 ElaboratedTypeKeyword Keyword, 5013 NestedNameSpecifier *NNS, 5014 const IdentifierInfo *Name, 5015 const TemplateArgumentListInfo &Args) const { 5016 // TODO: avoid this copy 5017 SmallVector<TemplateArgument, 16> ArgCopy; 5018 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5019 ArgCopy.push_back(Args[I].getArgument()); 5020 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5021 } 5022 5023 QualType 5024 ASTContext::getDependentTemplateSpecializationType( 5025 ElaboratedTypeKeyword Keyword, 5026 NestedNameSpecifier *NNS, 5027 const IdentifierInfo *Name, 5028 ArrayRef<TemplateArgument> Args) const { 5029 assert((!NNS || NNS->isDependent()) && 5030 "nested-name-specifier must be dependent"); 5031 5032 llvm::FoldingSetNodeID ID; 5033 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5034 Name, Args); 5035 5036 void *InsertPos = nullptr; 5037 DependentTemplateSpecializationType *T 5038 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5039 if (T) 5040 return QualType(T, 0); 5041 5042 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5043 5044 ElaboratedTypeKeyword CanonKeyword = Keyword; 5045 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 5046 5047 SmallVector<TemplateArgument, 16> CanonArgs; 5048 bool AnyNonCanonArgs = 5049 ::getCanonicalTemplateArguments(*this, Args, CanonArgs); 5050 5051 QualType Canon; 5052 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5053 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5054 Name, 5055 CanonArgs); 5056 5057 // Find the insert position again. 5058 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5059 } 5060 5061 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5062 sizeof(TemplateArgument) * Args.size()), 5063 TypeAlignment); 5064 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5065 Name, Args, Canon); 5066 Types.push_back(T); 5067 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5068 return QualType(T, 0); 5069 } 5070 5071 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5072 TemplateArgument Arg; 5073 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5074 QualType ArgType = getTypeDeclType(TTP); 5075 if (TTP->isParameterPack()) 5076 ArgType = getPackExpansionType(ArgType, None); 5077 5078 Arg = TemplateArgument(ArgType); 5079 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5080 QualType T = 5081 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5082 // For class NTTPs, ensure we include the 'const' so the type matches that 5083 // of a real template argument. 5084 // FIXME: It would be more faithful to model this as something like an 5085 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5086 if (T->isRecordType()) 5087 T.addConst(); 5088 Expr *E = new (*this) DeclRefExpr( 5089 *this, NTTP, /*enclosing*/ false, T, 5090 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5091 5092 if (NTTP->isParameterPack()) 5093 E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(), 5094 None); 5095 Arg = TemplateArgument(E); 5096 } else { 5097 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5098 if (TTP->isParameterPack()) 5099 Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>()); 5100 else 5101 Arg = TemplateArgument(TemplateName(TTP)); 5102 } 5103 5104 if (Param->isTemplateParameterPack()) 5105 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5106 5107 return Arg; 5108 } 5109 5110 void 5111 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5112 SmallVectorImpl<TemplateArgument> &Args) { 5113 Args.reserve(Args.size() + Params->size()); 5114 5115 for (NamedDecl *Param : *Params) 5116 Args.push_back(getInjectedTemplateArg(Param)); 5117 } 5118 5119 QualType ASTContext::getPackExpansionType(QualType Pattern, 5120 Optional<unsigned> NumExpansions, 5121 bool ExpectPackInType) { 5122 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5123 "Pack expansions must expand one or more parameter packs"); 5124 5125 llvm::FoldingSetNodeID ID; 5126 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5127 5128 void *InsertPos = nullptr; 5129 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5130 if (T) 5131 return QualType(T, 0); 5132 5133 QualType Canon; 5134 if (!Pattern.isCanonical()) { 5135 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5136 /*ExpectPackInType=*/false); 5137 5138 // Find the insert position again, in case we inserted an element into 5139 // PackExpansionTypes and invalidated our insert position. 5140 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5141 } 5142 5143 T = new (*this, TypeAlignment) 5144 PackExpansionType(Pattern, Canon, NumExpansions); 5145 Types.push_back(T); 5146 PackExpansionTypes.InsertNode(T, InsertPos); 5147 return QualType(T, 0); 5148 } 5149 5150 /// CmpProtocolNames - Comparison predicate for sorting protocols 5151 /// alphabetically. 5152 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5153 ObjCProtocolDecl *const *RHS) { 5154 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5155 } 5156 5157 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5158 if (Protocols.empty()) return true; 5159 5160 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5161 return false; 5162 5163 for (unsigned i = 1; i != Protocols.size(); ++i) 5164 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5165 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5166 return false; 5167 return true; 5168 } 5169 5170 static void 5171 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5172 // Sort protocols, keyed by name. 5173 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5174 5175 // Canonicalize. 5176 for (ObjCProtocolDecl *&P : Protocols) 5177 P = P->getCanonicalDecl(); 5178 5179 // Remove duplicates. 5180 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5181 Protocols.erase(ProtocolsEnd, Protocols.end()); 5182 } 5183 5184 QualType ASTContext::getObjCObjectType(QualType BaseType, 5185 ObjCProtocolDecl * const *Protocols, 5186 unsigned NumProtocols) const { 5187 return getObjCObjectType(BaseType, {}, 5188 llvm::makeArrayRef(Protocols, NumProtocols), 5189 /*isKindOf=*/false); 5190 } 5191 5192 QualType ASTContext::getObjCObjectType( 5193 QualType baseType, 5194 ArrayRef<QualType> typeArgs, 5195 ArrayRef<ObjCProtocolDecl *> protocols, 5196 bool isKindOf) const { 5197 // If the base type is an interface and there aren't any protocols or 5198 // type arguments to add, then the interface type will do just fine. 5199 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5200 isa<ObjCInterfaceType>(baseType)) 5201 return baseType; 5202 5203 // Look in the folding set for an existing type. 5204 llvm::FoldingSetNodeID ID; 5205 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5206 void *InsertPos = nullptr; 5207 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5208 return QualType(QT, 0); 5209 5210 // Determine the type arguments to be used for canonicalization, 5211 // which may be explicitly specified here or written on the base 5212 // type. 5213 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5214 if (effectiveTypeArgs.empty()) { 5215 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5216 effectiveTypeArgs = baseObject->getTypeArgs(); 5217 } 5218 5219 // Build the canonical type, which has the canonical base type and a 5220 // sorted-and-uniqued list of protocols and the type arguments 5221 // canonicalized. 5222 QualType canonical; 5223 bool typeArgsAreCanonical = llvm::all_of( 5224 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5225 bool protocolsSorted = areSortedAndUniqued(protocols); 5226 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5227 // Determine the canonical type arguments. 5228 ArrayRef<QualType> canonTypeArgs; 5229 SmallVector<QualType, 4> canonTypeArgsVec; 5230 if (!typeArgsAreCanonical) { 5231 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5232 for (auto typeArg : effectiveTypeArgs) 5233 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5234 canonTypeArgs = canonTypeArgsVec; 5235 } else { 5236 canonTypeArgs = effectiveTypeArgs; 5237 } 5238 5239 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5240 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5241 if (!protocolsSorted) { 5242 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5243 SortAndUniqueProtocols(canonProtocolsVec); 5244 canonProtocols = canonProtocolsVec; 5245 } else { 5246 canonProtocols = protocols; 5247 } 5248 5249 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5250 canonProtocols, isKindOf); 5251 5252 // Regenerate InsertPos. 5253 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5254 } 5255 5256 unsigned size = sizeof(ObjCObjectTypeImpl); 5257 size += typeArgs.size() * sizeof(QualType); 5258 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5259 void *mem = Allocate(size, TypeAlignment); 5260 auto *T = 5261 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5262 isKindOf); 5263 5264 Types.push_back(T); 5265 ObjCObjectTypes.InsertNode(T, InsertPos); 5266 return QualType(T, 0); 5267 } 5268 5269 /// Apply Objective-C protocol qualifiers to the given type. 5270 /// If this is for the canonical type of a type parameter, we can apply 5271 /// protocol qualifiers on the ObjCObjectPointerType. 5272 QualType 5273 ASTContext::applyObjCProtocolQualifiers(QualType type, 5274 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5275 bool allowOnPointerType) const { 5276 hasError = false; 5277 5278 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5279 return getObjCTypeParamType(objT->getDecl(), protocols); 5280 } 5281 5282 // Apply protocol qualifiers to ObjCObjectPointerType. 5283 if (allowOnPointerType) { 5284 if (const auto *objPtr = 5285 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5286 const ObjCObjectType *objT = objPtr->getObjectType(); 5287 // Merge protocol lists and construct ObjCObjectType. 5288 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5289 protocolsVec.append(objT->qual_begin(), 5290 objT->qual_end()); 5291 protocolsVec.append(protocols.begin(), protocols.end()); 5292 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5293 type = getObjCObjectType( 5294 objT->getBaseType(), 5295 objT->getTypeArgsAsWritten(), 5296 protocols, 5297 objT->isKindOfTypeAsWritten()); 5298 return getObjCObjectPointerType(type); 5299 } 5300 } 5301 5302 // Apply protocol qualifiers to ObjCObjectType. 5303 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5304 // FIXME: Check for protocols to which the class type is already 5305 // known to conform. 5306 5307 return getObjCObjectType(objT->getBaseType(), 5308 objT->getTypeArgsAsWritten(), 5309 protocols, 5310 objT->isKindOfTypeAsWritten()); 5311 } 5312 5313 // If the canonical type is ObjCObjectType, ... 5314 if (type->isObjCObjectType()) { 5315 // Silently overwrite any existing protocol qualifiers. 5316 // TODO: determine whether that's the right thing to do. 5317 5318 // FIXME: Check for protocols to which the class type is already 5319 // known to conform. 5320 return getObjCObjectType(type, {}, protocols, false); 5321 } 5322 5323 // id<protocol-list> 5324 if (type->isObjCIdType()) { 5325 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5326 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5327 objPtr->isKindOfType()); 5328 return getObjCObjectPointerType(type); 5329 } 5330 5331 // Class<protocol-list> 5332 if (type->isObjCClassType()) { 5333 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5334 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5335 objPtr->isKindOfType()); 5336 return getObjCObjectPointerType(type); 5337 } 5338 5339 hasError = true; 5340 return type; 5341 } 5342 5343 QualType 5344 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5345 ArrayRef<ObjCProtocolDecl *> protocols) const { 5346 // Look in the folding set for an existing type. 5347 llvm::FoldingSetNodeID ID; 5348 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5349 void *InsertPos = nullptr; 5350 if (ObjCTypeParamType *TypeParam = 5351 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5352 return QualType(TypeParam, 0); 5353 5354 // We canonicalize to the underlying type. 5355 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5356 if (!protocols.empty()) { 5357 // Apply the protocol qualifers. 5358 bool hasError; 5359 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5360 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5361 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5362 } 5363 5364 unsigned size = sizeof(ObjCTypeParamType); 5365 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5366 void *mem = Allocate(size, TypeAlignment); 5367 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5368 5369 Types.push_back(newType); 5370 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5371 return QualType(newType, 0); 5372 } 5373 5374 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5375 ObjCTypeParamDecl *New) const { 5376 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5377 // Update TypeForDecl after updating TypeSourceInfo. 5378 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5379 SmallVector<ObjCProtocolDecl *, 8> protocols; 5380 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5381 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5382 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5383 } 5384 5385 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5386 /// protocol list adopt all protocols in QT's qualified-id protocol 5387 /// list. 5388 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5389 ObjCInterfaceDecl *IC) { 5390 if (!QT->isObjCQualifiedIdType()) 5391 return false; 5392 5393 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5394 // If both the right and left sides have qualifiers. 5395 for (auto *Proto : OPT->quals()) { 5396 if (!IC->ClassImplementsProtocol(Proto, false)) 5397 return false; 5398 } 5399 return true; 5400 } 5401 return false; 5402 } 5403 5404 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5405 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5406 /// of protocols. 5407 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5408 ObjCInterfaceDecl *IDecl) { 5409 if (!QT->isObjCQualifiedIdType()) 5410 return false; 5411 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5412 if (!OPT) 5413 return false; 5414 if (!IDecl->hasDefinition()) 5415 return false; 5416 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5417 CollectInheritedProtocols(IDecl, InheritedProtocols); 5418 if (InheritedProtocols.empty()) 5419 return false; 5420 // Check that if every protocol in list of id<plist> conforms to a protocol 5421 // of IDecl's, then bridge casting is ok. 5422 bool Conforms = false; 5423 for (auto *Proto : OPT->quals()) { 5424 Conforms = false; 5425 for (auto *PI : InheritedProtocols) { 5426 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5427 Conforms = true; 5428 break; 5429 } 5430 } 5431 if (!Conforms) 5432 break; 5433 } 5434 if (Conforms) 5435 return true; 5436 5437 for (auto *PI : InheritedProtocols) { 5438 // If both the right and left sides have qualifiers. 5439 bool Adopts = false; 5440 for (auto *Proto : OPT->quals()) { 5441 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5442 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5443 break; 5444 } 5445 if (!Adopts) 5446 return false; 5447 } 5448 return true; 5449 } 5450 5451 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5452 /// the given object type. 5453 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5454 llvm::FoldingSetNodeID ID; 5455 ObjCObjectPointerType::Profile(ID, ObjectT); 5456 5457 void *InsertPos = nullptr; 5458 if (ObjCObjectPointerType *QT = 5459 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5460 return QualType(QT, 0); 5461 5462 // Find the canonical object type. 5463 QualType Canonical; 5464 if (!ObjectT.isCanonical()) { 5465 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5466 5467 // Regenerate InsertPos. 5468 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5469 } 5470 5471 // No match. 5472 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 5473 auto *QType = 5474 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5475 5476 Types.push_back(QType); 5477 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5478 return QualType(QType, 0); 5479 } 5480 5481 /// getObjCInterfaceType - Return the unique reference to the type for the 5482 /// specified ObjC interface decl. The list of protocols is optional. 5483 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5484 ObjCInterfaceDecl *PrevDecl) const { 5485 if (Decl->TypeForDecl) 5486 return QualType(Decl->TypeForDecl, 0); 5487 5488 if (PrevDecl) { 5489 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5490 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5491 return QualType(PrevDecl->TypeForDecl, 0); 5492 } 5493 5494 // Prefer the definition, if there is one. 5495 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5496 Decl = Def; 5497 5498 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 5499 auto *T = new (Mem) ObjCInterfaceType(Decl); 5500 Decl->TypeForDecl = T; 5501 Types.push_back(T); 5502 return QualType(T, 0); 5503 } 5504 5505 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5506 /// TypeOfExprType AST's (since expression's are never shared). For example, 5507 /// multiple declarations that refer to "typeof(x)" all contain different 5508 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5509 /// on canonical type's (which are always unique). 5510 QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { 5511 TypeOfExprType *toe; 5512 if (tofExpr->isTypeDependent()) { 5513 llvm::FoldingSetNodeID ID; 5514 DependentTypeOfExprType::Profile(ID, *this, tofExpr); 5515 5516 void *InsertPos = nullptr; 5517 DependentTypeOfExprType *Canon 5518 = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5519 if (Canon) { 5520 // We already have a "canonical" version of an identical, dependent 5521 // typeof(expr) type. Use that as our canonical type. 5522 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, 5523 QualType((TypeOfExprType*)Canon, 0)); 5524 } else { 5525 // Build a new, canonical typeof(expr) type. 5526 Canon 5527 = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); 5528 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5529 toe = Canon; 5530 } 5531 } else { 5532 QualType Canonical = getCanonicalType(tofExpr->getType()); 5533 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); 5534 } 5535 Types.push_back(toe); 5536 return QualType(toe, 0); 5537 } 5538 5539 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5540 /// TypeOfType nodes. The only motivation to unique these nodes would be 5541 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5542 /// an issue. This doesn't affect the type checker, since it operates 5543 /// on canonical types (which are always unique). 5544 QualType ASTContext::getTypeOfType(QualType tofType) const { 5545 QualType Canonical = getCanonicalType(tofType); 5546 auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); 5547 Types.push_back(tot); 5548 return QualType(tot, 0); 5549 } 5550 5551 /// getReferenceQualifiedType - Given an expr, will return the type for 5552 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5553 /// and class member access into account. 5554 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5555 // C++11 [dcl.type.simple]p4: 5556 // [...] 5557 QualType T = E->getType(); 5558 switch (E->getValueKind()) { 5559 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5560 // type of e; 5561 case VK_XValue: 5562 return getRValueReferenceType(T); 5563 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5564 // type of e; 5565 case VK_LValue: 5566 return getLValueReferenceType(T); 5567 // - otherwise, decltype(e) is the type of e. 5568 case VK_PRValue: 5569 return T; 5570 } 5571 llvm_unreachable("Unknown value kind"); 5572 } 5573 5574 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5575 /// nodes. This would never be helpful, since each such type has its own 5576 /// expression, and would not give a significant memory saving, since there 5577 /// is an Expr tree under each such type. 5578 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5579 DecltypeType *dt; 5580 5581 // C++11 [temp.type]p2: 5582 // If an expression e involves a template parameter, decltype(e) denotes a 5583 // unique dependent type. Two such decltype-specifiers refer to the same 5584 // type only if their expressions are equivalent (14.5.6.1). 5585 if (e->isInstantiationDependent()) { 5586 llvm::FoldingSetNodeID ID; 5587 DependentDecltypeType::Profile(ID, *this, e); 5588 5589 void *InsertPos = nullptr; 5590 DependentDecltypeType *Canon 5591 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5592 if (!Canon) { 5593 // Build a new, canonical decltype(expr) type. 5594 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 5595 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5596 } 5597 dt = new (*this, TypeAlignment) 5598 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5599 } else { 5600 dt = new (*this, TypeAlignment) 5601 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5602 } 5603 Types.push_back(dt); 5604 return QualType(dt, 0); 5605 } 5606 5607 /// getUnaryTransformationType - We don't unique these, since the memory 5608 /// savings are minimal and these are rare. 5609 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5610 QualType UnderlyingType, 5611 UnaryTransformType::UTTKind Kind) 5612 const { 5613 UnaryTransformType *ut = nullptr; 5614 5615 if (BaseType->isDependentType()) { 5616 // Look in the folding set for an existing type. 5617 llvm::FoldingSetNodeID ID; 5618 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5619 5620 void *InsertPos = nullptr; 5621 DependentUnaryTransformType *Canon 5622 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5623 5624 if (!Canon) { 5625 // Build a new, canonical __underlying_type(type) type. 5626 Canon = new (*this, TypeAlignment) 5627 DependentUnaryTransformType(*this, getCanonicalType(BaseType), 5628 Kind); 5629 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5630 } 5631 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5632 QualType(), Kind, 5633 QualType(Canon, 0)); 5634 } else { 5635 QualType CanonType = getCanonicalType(UnderlyingType); 5636 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5637 UnderlyingType, Kind, 5638 CanonType); 5639 } 5640 Types.push_back(ut); 5641 return QualType(ut, 0); 5642 } 5643 5644 QualType ASTContext::getAutoTypeInternal( 5645 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5646 bool IsPack, ConceptDecl *TypeConstraintConcept, 5647 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5648 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5649 !TypeConstraintConcept && !IsDependent) 5650 return getAutoDeductType(); 5651 5652 // Look in the folding set for an existing type. 5653 void *InsertPos = nullptr; 5654 llvm::FoldingSetNodeID ID; 5655 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5656 TypeConstraintConcept, TypeConstraintArgs); 5657 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5658 return QualType(AT, 0); 5659 5660 QualType Canon; 5661 if (!IsCanon) { 5662 if (DeducedType.isNull()) { 5663 SmallVector<TemplateArgument, 4> CanonArgs; 5664 bool AnyNonCanonArgs = 5665 ::getCanonicalTemplateArguments(*this, TypeConstraintArgs, CanonArgs); 5666 if (AnyNonCanonArgs) { 5667 Canon = getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5668 TypeConstraintConcept, CanonArgs, true); 5669 // Find the insert position again. 5670 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5671 } 5672 } else { 5673 Canon = DeducedType.getCanonicalType(); 5674 } 5675 } 5676 5677 void *Mem = Allocate(sizeof(AutoType) + 5678 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5679 TypeAlignment); 5680 auto *AT = new (Mem) AutoType( 5681 DeducedType, Keyword, 5682 (IsDependent ? TypeDependence::DependentInstantiation 5683 : TypeDependence::None) | 5684 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5685 Canon, TypeConstraintConcept, TypeConstraintArgs); 5686 Types.push_back(AT); 5687 AutoTypes.InsertNode(AT, InsertPos); 5688 return QualType(AT, 0); 5689 } 5690 5691 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5692 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5693 /// canonical deduced-but-dependent 'auto' type. 5694 QualType 5695 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5696 bool IsDependent, bool IsPack, 5697 ConceptDecl *TypeConstraintConcept, 5698 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5699 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5700 assert((!IsDependent || DeducedType.isNull()) && 5701 "A dependent auto should be undeduced"); 5702 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5703 TypeConstraintConcept, TypeConstraintArgs); 5704 } 5705 5706 /// Return the uniqued reference to the deduced template specialization type 5707 /// which has been deduced to the given type, or to the canonical undeduced 5708 /// such type, or the canonical deduced-but-dependent such type. 5709 QualType ASTContext::getDeducedTemplateSpecializationType( 5710 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5711 // Look in the folding set for an existing type. 5712 void *InsertPos = nullptr; 5713 llvm::FoldingSetNodeID ID; 5714 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5715 IsDependent); 5716 if (DeducedTemplateSpecializationType *DTST = 5717 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5718 return QualType(DTST, 0); 5719 5720 auto *DTST = new (*this, TypeAlignment) 5721 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5722 llvm::FoldingSetNodeID TempID; 5723 DTST->Profile(TempID); 5724 assert(ID == TempID && "ID does not match"); 5725 Types.push_back(DTST); 5726 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5727 return QualType(DTST, 0); 5728 } 5729 5730 /// getAtomicType - Return the uniqued reference to the atomic type for 5731 /// the given value type. 5732 QualType ASTContext::getAtomicType(QualType T) const { 5733 // Unique pointers, to guarantee there is only one pointer of a particular 5734 // structure. 5735 llvm::FoldingSetNodeID ID; 5736 AtomicType::Profile(ID, T); 5737 5738 void *InsertPos = nullptr; 5739 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5740 return QualType(AT, 0); 5741 5742 // If the atomic value type isn't canonical, this won't be a canonical type 5743 // either, so fill in the canonical type field. 5744 QualType Canonical; 5745 if (!T.isCanonical()) { 5746 Canonical = getAtomicType(getCanonicalType(T)); 5747 5748 // Get the new insert position for the node we care about. 5749 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5750 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5751 } 5752 auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 5753 Types.push_back(New); 5754 AtomicTypes.InsertNode(New, InsertPos); 5755 return QualType(New, 0); 5756 } 5757 5758 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5759 QualType ASTContext::getAutoDeductType() const { 5760 if (AutoDeductTy.isNull()) 5761 AutoDeductTy = QualType(new (*this, TypeAlignment) 5762 AutoType(QualType(), AutoTypeKeyword::Auto, 5763 TypeDependence::None, QualType(), 5764 /*concept*/ nullptr, /*args*/ {}), 5765 0); 5766 return AutoDeductTy; 5767 } 5768 5769 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5770 QualType ASTContext::getAutoRRefDeductType() const { 5771 if (AutoRRefDeductTy.isNull()) 5772 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5773 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5774 return AutoRRefDeductTy; 5775 } 5776 5777 /// getTagDeclType - Return the unique reference to the type for the 5778 /// specified TagDecl (struct/union/class/enum) decl. 5779 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5780 assert(Decl); 5781 // FIXME: What is the design on getTagDeclType when it requires casting 5782 // away const? mutable? 5783 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5784 } 5785 5786 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5787 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5788 /// needs to agree with the definition in <stddef.h>. 5789 CanQualType ASTContext::getSizeType() const { 5790 return getFromTargetType(Target->getSizeType()); 5791 } 5792 5793 /// Return the unique signed counterpart of the integer type 5794 /// corresponding to size_t. 5795 CanQualType ASTContext::getSignedSizeType() const { 5796 return getFromTargetType(Target->getSignedSizeType()); 5797 } 5798 5799 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5800 CanQualType ASTContext::getIntMaxType() const { 5801 return getFromTargetType(Target->getIntMaxType()); 5802 } 5803 5804 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5805 CanQualType ASTContext::getUIntMaxType() const { 5806 return getFromTargetType(Target->getUIntMaxType()); 5807 } 5808 5809 /// getSignedWCharType - Return the type of "signed wchar_t". 5810 /// Used when in C++, as a GCC extension. 5811 QualType ASTContext::getSignedWCharType() const { 5812 // FIXME: derive from "Target" ? 5813 return WCharTy; 5814 } 5815 5816 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5817 /// Used when in C++, as a GCC extension. 5818 QualType ASTContext::getUnsignedWCharType() const { 5819 // FIXME: derive from "Target" ? 5820 return UnsignedIntTy; 5821 } 5822 5823 QualType ASTContext::getIntPtrType() const { 5824 return getFromTargetType(Target->getIntPtrType()); 5825 } 5826 5827 QualType ASTContext::getUIntPtrType() const { 5828 return getCorrespondingUnsignedType(getIntPtrType()); 5829 } 5830 5831 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5832 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5833 QualType ASTContext::getPointerDiffType() const { 5834 return getFromTargetType(Target->getPtrDiffType(0)); 5835 } 5836 5837 /// Return the unique unsigned counterpart of "ptrdiff_t" 5838 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5839 /// in the definition of %tu format specifier. 5840 QualType ASTContext::getUnsignedPointerDiffType() const { 5841 return getFromTargetType(Target->getUnsignedPtrDiffType(0)); 5842 } 5843 5844 /// Return the unique type for "pid_t" defined in 5845 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5846 QualType ASTContext::getProcessIDType() const { 5847 return getFromTargetType(Target->getProcessIDType()); 5848 } 5849 5850 //===----------------------------------------------------------------------===// 5851 // Type Operators 5852 //===----------------------------------------------------------------------===// 5853 5854 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5855 // Push qualifiers into arrays, and then discard any remaining 5856 // qualifiers. 5857 T = getCanonicalType(T); 5858 T = getVariableArrayDecayedType(T); 5859 const Type *Ty = T.getTypePtr(); 5860 QualType Result; 5861 if (isa<ArrayType>(Ty)) { 5862 Result = getArrayDecayedType(QualType(Ty,0)); 5863 } else if (isa<FunctionType>(Ty)) { 5864 Result = getPointerType(QualType(Ty, 0)); 5865 } else { 5866 Result = QualType(Ty, 0); 5867 } 5868 5869 return CanQualType::CreateUnsafe(Result); 5870 } 5871 5872 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5873 Qualifiers &quals) { 5874 SplitQualType splitType = type.getSplitUnqualifiedType(); 5875 5876 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5877 // the unqualified desugared type and then drops it on the floor. 5878 // We then have to strip that sugar back off with 5879 // getUnqualifiedDesugaredType(), which is silly. 5880 const auto *AT = 5881 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5882 5883 // If we don't have an array, just use the results in splitType. 5884 if (!AT) { 5885 quals = splitType.Quals; 5886 return QualType(splitType.Ty, 0); 5887 } 5888 5889 // Otherwise, recurse on the array's element type. 5890 QualType elementType = AT->getElementType(); 5891 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 5892 5893 // If that didn't change the element type, AT has no qualifiers, so we 5894 // can just use the results in splitType. 5895 if (elementType == unqualElementType) { 5896 assert(quals.empty()); // from the recursive call 5897 quals = splitType.Quals; 5898 return QualType(splitType.Ty, 0); 5899 } 5900 5901 // Otherwise, add in the qualifiers from the outermost type, then 5902 // build the type back up. 5903 quals.addConsistentQualifiers(splitType.Quals); 5904 5905 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 5906 return getConstantArrayType(unqualElementType, CAT->getSize(), 5907 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 5908 } 5909 5910 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 5911 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 5912 } 5913 5914 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 5915 return getVariableArrayType(unqualElementType, 5916 VAT->getSizeExpr(), 5917 VAT->getSizeModifier(), 5918 VAT->getIndexTypeCVRQualifiers(), 5919 VAT->getBracketsRange()); 5920 } 5921 5922 const auto *DSAT = cast<DependentSizedArrayType>(AT); 5923 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 5924 DSAT->getSizeModifier(), 0, 5925 SourceRange()); 5926 } 5927 5928 /// Attempt to unwrap two types that may both be array types with the same bound 5929 /// (or both be array types of unknown bound) for the purpose of comparing the 5930 /// cv-decomposition of two types per C++ [conv.qual]. 5931 /// 5932 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 5933 /// C++20 [conv.qual], if permitted by the current language mode. 5934 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 5935 bool AllowPiMismatch) { 5936 while (true) { 5937 auto *AT1 = getAsArrayType(T1); 5938 if (!AT1) 5939 return; 5940 5941 auto *AT2 = getAsArrayType(T2); 5942 if (!AT2) 5943 return; 5944 5945 // If we don't have two array types with the same constant bound nor two 5946 // incomplete array types, we've unwrapped everything we can. 5947 // C++20 also permits one type to be a constant array type and the other 5948 // to be an incomplete array type. 5949 // FIXME: Consider also unwrapping array of unknown bound and VLA. 5950 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 5951 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 5952 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 5953 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 5954 isa<IncompleteArrayType>(AT2)))) 5955 return; 5956 } else if (isa<IncompleteArrayType>(AT1)) { 5957 if (!(isa<IncompleteArrayType>(AT2) || 5958 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 5959 isa<ConstantArrayType>(AT2)))) 5960 return; 5961 } else { 5962 return; 5963 } 5964 5965 T1 = AT1->getElementType(); 5966 T2 = AT2->getElementType(); 5967 } 5968 } 5969 5970 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 5971 /// 5972 /// If T1 and T2 are both pointer types of the same kind, or both array types 5973 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 5974 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 5975 /// 5976 /// This function will typically be called in a loop that successively 5977 /// "unwraps" pointer and pointer-to-member types to compare them at each 5978 /// level. 5979 /// 5980 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 5981 /// C++20 [conv.qual], if permitted by the current language mode. 5982 /// 5983 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 5984 /// pair of types that can't be unwrapped further. 5985 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 5986 bool AllowPiMismatch) { 5987 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 5988 5989 const auto *T1PtrType = T1->getAs<PointerType>(); 5990 const auto *T2PtrType = T2->getAs<PointerType>(); 5991 if (T1PtrType && T2PtrType) { 5992 T1 = T1PtrType->getPointeeType(); 5993 T2 = T2PtrType->getPointeeType(); 5994 return true; 5995 } 5996 5997 const auto *T1MPType = T1->getAs<MemberPointerType>(); 5998 const auto *T2MPType = T2->getAs<MemberPointerType>(); 5999 if (T1MPType && T2MPType && 6000 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6001 QualType(T2MPType->getClass(), 0))) { 6002 T1 = T1MPType->getPointeeType(); 6003 T2 = T2MPType->getPointeeType(); 6004 return true; 6005 } 6006 6007 if (getLangOpts().ObjC) { 6008 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6009 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6010 if (T1OPType && T2OPType) { 6011 T1 = T1OPType->getPointeeType(); 6012 T2 = T2OPType->getPointeeType(); 6013 return true; 6014 } 6015 } 6016 6017 // FIXME: Block pointers, too? 6018 6019 return false; 6020 } 6021 6022 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6023 while (true) { 6024 Qualifiers Quals; 6025 T1 = getUnqualifiedArrayType(T1, Quals); 6026 T2 = getUnqualifiedArrayType(T2, Quals); 6027 if (hasSameType(T1, T2)) 6028 return true; 6029 if (!UnwrapSimilarTypes(T1, T2)) 6030 return false; 6031 } 6032 } 6033 6034 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6035 while (true) { 6036 Qualifiers Quals1, Quals2; 6037 T1 = getUnqualifiedArrayType(T1, Quals1); 6038 T2 = getUnqualifiedArrayType(T2, Quals2); 6039 6040 Quals1.removeCVRQualifiers(); 6041 Quals2.removeCVRQualifiers(); 6042 if (Quals1 != Quals2) 6043 return false; 6044 6045 if (hasSameType(T1, T2)) 6046 return true; 6047 6048 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6049 return false; 6050 } 6051 } 6052 6053 DeclarationNameInfo 6054 ASTContext::getNameForTemplate(TemplateName Name, 6055 SourceLocation NameLoc) const { 6056 switch (Name.getKind()) { 6057 case TemplateName::QualifiedTemplate: 6058 case TemplateName::Template: 6059 // DNInfo work in progress: CHECKME: what about DNLoc? 6060 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6061 NameLoc); 6062 6063 case TemplateName::OverloadedTemplate: { 6064 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6065 // DNInfo work in progress: CHECKME: what about DNLoc? 6066 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6067 } 6068 6069 case TemplateName::AssumedTemplate: { 6070 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6071 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6072 } 6073 6074 case TemplateName::DependentTemplate: { 6075 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6076 DeclarationName DName; 6077 if (DTN->isIdentifier()) { 6078 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6079 return DeclarationNameInfo(DName, NameLoc); 6080 } else { 6081 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6082 // DNInfo work in progress: FIXME: source locations? 6083 DeclarationNameLoc DNLoc = 6084 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6085 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6086 } 6087 } 6088 6089 case TemplateName::SubstTemplateTemplateParm: { 6090 SubstTemplateTemplateParmStorage *subst 6091 = Name.getAsSubstTemplateTemplateParm(); 6092 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6093 NameLoc); 6094 } 6095 6096 case TemplateName::SubstTemplateTemplateParmPack: { 6097 SubstTemplateTemplateParmPackStorage *subst 6098 = Name.getAsSubstTemplateTemplateParmPack(); 6099 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6100 NameLoc); 6101 } 6102 } 6103 6104 llvm_unreachable("bad template name kind!"); 6105 } 6106 6107 TemplateName 6108 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6109 switch (Name.getKind()) { 6110 case TemplateName::QualifiedTemplate: 6111 case TemplateName::Template: { 6112 TemplateDecl *Template = Name.getAsTemplateDecl(); 6113 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6114 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6115 6116 // The canonical template name is the canonical template declaration. 6117 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6118 } 6119 6120 case TemplateName::OverloadedTemplate: 6121 case TemplateName::AssumedTemplate: 6122 llvm_unreachable("cannot canonicalize unresolved template"); 6123 6124 case TemplateName::DependentTemplate: { 6125 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6126 assert(DTN && "Non-dependent template names must refer to template decls."); 6127 return DTN->CanonicalTemplateName; 6128 } 6129 6130 case TemplateName::SubstTemplateTemplateParm: { 6131 SubstTemplateTemplateParmStorage *subst 6132 = Name.getAsSubstTemplateTemplateParm(); 6133 return getCanonicalTemplateName(subst->getReplacement()); 6134 } 6135 6136 case TemplateName::SubstTemplateTemplateParmPack: { 6137 SubstTemplateTemplateParmPackStorage *subst 6138 = Name.getAsSubstTemplateTemplateParmPack(); 6139 TemplateTemplateParmDecl *canonParameter 6140 = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); 6141 TemplateArgument canonArgPack 6142 = getCanonicalTemplateArgument(subst->getArgumentPack()); 6143 return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); 6144 } 6145 } 6146 6147 llvm_unreachable("bad template name!"); 6148 } 6149 6150 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6151 const TemplateName &Y) const { 6152 return getCanonicalTemplateName(X).getAsVoidPointer() == 6153 getCanonicalTemplateName(Y).getAsVoidPointer(); 6154 } 6155 6156 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6157 const NamedDecl *Y) { 6158 if (X->getKind() != Y->getKind()) 6159 return false; 6160 6161 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6162 auto *TY = cast<TemplateTypeParmDecl>(Y); 6163 if (TX->isParameterPack() != TY->isParameterPack()) 6164 return false; 6165 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6166 return false; 6167 const TypeConstraint *TXTC = TX->getTypeConstraint(); 6168 const TypeConstraint *TYTC = TY->getTypeConstraint(); 6169 if (!TXTC != !TYTC) 6170 return false; 6171 if (TXTC && TYTC) { 6172 auto *NCX = TXTC->getNamedConcept(); 6173 auto *NCY = TYTC->getNamedConcept(); 6174 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6175 return false; 6176 if (TXTC->hasExplicitTemplateArgs() != TYTC->hasExplicitTemplateArgs()) 6177 return false; 6178 if (TXTC->hasExplicitTemplateArgs()) { 6179 auto *TXTCArgs = TXTC->getTemplateArgsAsWritten(); 6180 auto *TYTCArgs = TYTC->getTemplateArgsAsWritten(); 6181 if (TXTCArgs->NumTemplateArgs != TYTCArgs->NumTemplateArgs) 6182 return false; 6183 llvm::FoldingSetNodeID XID, YID; 6184 for (auto &ArgLoc : TXTCArgs->arguments()) 6185 ArgLoc.getArgument().Profile(XID, X->getASTContext()); 6186 for (auto &ArgLoc : TYTCArgs->arguments()) 6187 ArgLoc.getArgument().Profile(YID, Y->getASTContext()); 6188 if (XID != YID) 6189 return false; 6190 } 6191 } 6192 return true; 6193 } 6194 6195 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6196 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6197 return TX->isParameterPack() == TY->isParameterPack() && 6198 TX->getASTContext().hasSameType(TX->getType(), TY->getType()); 6199 } 6200 6201 auto *TX = cast<TemplateTemplateParmDecl>(X); 6202 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6203 return TX->isParameterPack() == TY->isParameterPack() && 6204 isSameTemplateParameterList(TX->getTemplateParameters(), 6205 TY->getTemplateParameters()); 6206 } 6207 6208 bool ASTContext::isSameTemplateParameterList(const TemplateParameterList *X, 6209 const TemplateParameterList *Y) { 6210 if (X->size() != Y->size()) 6211 return false; 6212 6213 for (unsigned I = 0, N = X->size(); I != N; ++I) 6214 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6215 return false; 6216 6217 const Expr *XRC = X->getRequiresClause(); 6218 const Expr *YRC = Y->getRequiresClause(); 6219 if (!XRC != !YRC) 6220 return false; 6221 if (XRC) { 6222 llvm::FoldingSetNodeID XRCID, YRCID; 6223 XRC->Profile(XRCID, *this, /*Canonical=*/true); 6224 YRC->Profile(YRCID, *this, /*Canonical=*/true); 6225 if (XRCID != YRCID) 6226 return false; 6227 } 6228 6229 return true; 6230 } 6231 6232 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6233 if (auto *NS = X->getAsNamespace()) 6234 return NS; 6235 if (auto *NAS = X->getAsNamespaceAlias()) 6236 return NAS->getNamespace(); 6237 return nullptr; 6238 } 6239 6240 static bool isSameQualifier(const NestedNameSpecifier *X, 6241 const NestedNameSpecifier *Y) { 6242 if (auto *NSX = getNamespace(X)) { 6243 auto *NSY = getNamespace(Y); 6244 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6245 return false; 6246 } else if (X->getKind() != Y->getKind()) 6247 return false; 6248 6249 // FIXME: For namespaces and types, we're permitted to check that the entity 6250 // is named via the same tokens. We should probably do so. 6251 switch (X->getKind()) { 6252 case NestedNameSpecifier::Identifier: 6253 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6254 return false; 6255 break; 6256 case NestedNameSpecifier::Namespace: 6257 case NestedNameSpecifier::NamespaceAlias: 6258 // We've already checked that we named the same namespace. 6259 break; 6260 case NestedNameSpecifier::TypeSpec: 6261 case NestedNameSpecifier::TypeSpecWithTemplate: 6262 if (X->getAsType()->getCanonicalTypeInternal() != 6263 Y->getAsType()->getCanonicalTypeInternal()) 6264 return false; 6265 break; 6266 case NestedNameSpecifier::Global: 6267 case NestedNameSpecifier::Super: 6268 return true; 6269 } 6270 6271 // Recurse into earlier portion of NNS, if any. 6272 auto *PX = X->getPrefix(); 6273 auto *PY = Y->getPrefix(); 6274 if (PX && PY) 6275 return isSameQualifier(PX, PY); 6276 return !PX && !PY; 6277 } 6278 6279 /// Determine whether the attributes we can overload on are identical for A and 6280 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6281 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6282 const FunctionDecl *B) { 6283 // Note that pass_object_size attributes are represented in the function's 6284 // ExtParameterInfo, so we don't need to check them here. 6285 6286 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6287 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6288 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6289 6290 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6291 Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6292 Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6293 6294 // Return false if the number of enable_if attributes is different. 6295 if (!Cand1A || !Cand2A) 6296 return false; 6297 6298 Cand1ID.clear(); 6299 Cand2ID.clear(); 6300 6301 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6302 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6303 6304 // Return false if any of the enable_if expressions of A and B are 6305 // different. 6306 if (Cand1ID != Cand2ID) 6307 return false; 6308 } 6309 return true; 6310 } 6311 6312 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) { 6313 if (X == Y) 6314 return true; 6315 6316 if (X->getDeclName() != Y->getDeclName()) 6317 return false; 6318 6319 // Must be in the same context. 6320 // 6321 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6322 // could be two different declarations of the same function. (We will fix the 6323 // semantic DC to refer to the primary definition after merging.) 6324 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6325 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6326 return false; 6327 6328 // Two typedefs refer to the same entity if they have the same underlying 6329 // type. 6330 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6331 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6332 return hasSameType(TypedefX->getUnderlyingType(), 6333 TypedefY->getUnderlyingType()); 6334 6335 // Must have the same kind. 6336 if (X->getKind() != Y->getKind()) 6337 return false; 6338 6339 // Objective-C classes and protocols with the same name always match. 6340 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6341 return true; 6342 6343 if (isa<ClassTemplateSpecializationDecl>(X)) { 6344 // No need to handle these here: we merge them when adding them to the 6345 // template. 6346 return false; 6347 } 6348 6349 // Compatible tags match. 6350 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6351 const auto *TagY = cast<TagDecl>(Y); 6352 return (TagX->getTagKind() == TagY->getTagKind()) || 6353 ((TagX->getTagKind() == TTK_Struct || 6354 TagX->getTagKind() == TTK_Class || 6355 TagX->getTagKind() == TTK_Interface) && 6356 (TagY->getTagKind() == TTK_Struct || 6357 TagY->getTagKind() == TTK_Class || 6358 TagY->getTagKind() == TTK_Interface)); 6359 } 6360 6361 // Functions with the same type and linkage match. 6362 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6363 // functions, etc. 6364 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6365 const auto *FuncY = cast<FunctionDecl>(Y); 6366 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6367 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6368 if (CtorX->getInheritedConstructor() && 6369 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6370 CtorY->getInheritedConstructor().getConstructor())) 6371 return false; 6372 } 6373 6374 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6375 return false; 6376 6377 // Multiversioned functions with different feature strings are represented 6378 // as separate declarations. 6379 if (FuncX->isMultiVersion()) { 6380 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6381 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6382 assert(TAX && TAY && "Multiversion Function without target attribute"); 6383 6384 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6385 return false; 6386 } 6387 6388 const Expr *XRC = FuncX->getTrailingRequiresClause(); 6389 const Expr *YRC = FuncY->getTrailingRequiresClause(); 6390 if (!XRC != !YRC) 6391 return false; 6392 if (XRC) { 6393 llvm::FoldingSetNodeID XRCID, YRCID; 6394 XRC->Profile(XRCID, *this, /*Canonical=*/true); 6395 YRC->Profile(YRCID, *this, /*Canonical=*/true); 6396 if (XRCID != YRCID) 6397 return false; 6398 } 6399 6400 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6401 // Map to the first declaration that we've already merged into this one. 6402 // The TSI of redeclarations might not match (due to calling conventions 6403 // being inherited onto the type but not the TSI), but the TSI type of 6404 // the first declaration of the function should match across modules. 6405 FD = FD->getCanonicalDecl(); 6406 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6407 : FD->getType(); 6408 }; 6409 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6410 if (!hasSameType(XT, YT)) { 6411 // We can get functions with different types on the redecl chain in C++17 6412 // if they have differing exception specifications and at least one of 6413 // the excpetion specs is unresolved. 6414 auto *XFPT = XT->getAs<FunctionProtoType>(); 6415 auto *YFPT = YT->getAs<FunctionProtoType>(); 6416 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6417 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6418 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6419 // FIXME: We could make isSameEntity const after we make 6420 // hasSameFunctionTypeIgnoringExceptionSpec const. 6421 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6422 return true; 6423 return false; 6424 } 6425 6426 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6427 hasSameOverloadableAttrs(FuncX, FuncY); 6428 } 6429 6430 // Variables with the same type and linkage match. 6431 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6432 const auto *VarY = cast<VarDecl>(Y); 6433 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6434 if (hasSameType(VarX->getType(), VarY->getType())) 6435 return true; 6436 6437 // We can get decls with different types on the redecl chain. Eg. 6438 // template <typename T> struct S { static T Var[]; }; // #1 6439 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6440 // Only? happens when completing an incomplete array type. In this case 6441 // when comparing #1 and #2 we should go through their element type. 6442 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6443 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6444 if (!VarXTy || !VarYTy) 6445 return false; 6446 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6447 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6448 } 6449 return false; 6450 } 6451 6452 // Namespaces with the same name and inlinedness match. 6453 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6454 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6455 return NamespaceX->isInline() == NamespaceY->isInline(); 6456 } 6457 6458 // Identical template names and kinds match if their template parameter lists 6459 // and patterns match. 6460 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6461 const auto *TemplateY = cast<TemplateDecl>(Y); 6462 return isSameEntity(TemplateX->getTemplatedDecl(), 6463 TemplateY->getTemplatedDecl()) && 6464 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6465 TemplateY->getTemplateParameters()); 6466 } 6467 6468 // Fields with the same name and the same type match. 6469 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6470 const auto *FDY = cast<FieldDecl>(Y); 6471 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6472 return hasSameType(FDX->getType(), FDY->getType()); 6473 } 6474 6475 // Indirect fields with the same target field match. 6476 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6477 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6478 return IFDX->getAnonField()->getCanonicalDecl() == 6479 IFDY->getAnonField()->getCanonicalDecl(); 6480 } 6481 6482 // Enumerators with the same name match. 6483 if (isa<EnumConstantDecl>(X)) 6484 // FIXME: Also check the value is odr-equivalent. 6485 return true; 6486 6487 // Using shadow declarations with the same target match. 6488 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6489 const auto *USY = cast<UsingShadowDecl>(Y); 6490 return USX->getTargetDecl() == USY->getTargetDecl(); 6491 } 6492 6493 // Using declarations with the same qualifier match. (We already know that 6494 // the name matches.) 6495 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6496 const auto *UY = cast<UsingDecl>(Y); 6497 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6498 UX->hasTypename() == UY->hasTypename() && 6499 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6500 } 6501 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6502 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6503 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6504 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6505 } 6506 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6507 return isSameQualifier( 6508 UX->getQualifier(), 6509 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6510 } 6511 6512 // Using-pack declarations are only created by instantiation, and match if 6513 // they're instantiated from matching UnresolvedUsing...Decls. 6514 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6515 return declaresSameEntity( 6516 UX->getInstantiatedFromUsingDecl(), 6517 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6518 } 6519 6520 // Namespace alias definitions with the same target match. 6521 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6522 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6523 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6524 } 6525 6526 return false; 6527 } 6528 6529 TemplateArgument 6530 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6531 switch (Arg.getKind()) { 6532 case TemplateArgument::Null: 6533 return Arg; 6534 6535 case TemplateArgument::Expression: 6536 return Arg; 6537 6538 case TemplateArgument::Declaration: { 6539 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6540 return TemplateArgument(D, Arg.getParamTypeForDecl()); 6541 } 6542 6543 case TemplateArgument::NullPtr: 6544 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6545 /*isNullPtr*/true); 6546 6547 case TemplateArgument::Template: 6548 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); 6549 6550 case TemplateArgument::TemplateExpansion: 6551 return TemplateArgument(getCanonicalTemplateName( 6552 Arg.getAsTemplateOrTemplatePattern()), 6553 Arg.getNumTemplateExpansions()); 6554 6555 case TemplateArgument::Integral: 6556 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6557 6558 case TemplateArgument::Type: 6559 return TemplateArgument(getCanonicalType(Arg.getAsType())); 6560 6561 case TemplateArgument::Pack: { 6562 if (Arg.pack_size() == 0) 6563 return Arg; 6564 6565 auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()]; 6566 unsigned Idx = 0; 6567 for (TemplateArgument::pack_iterator A = Arg.pack_begin(), 6568 AEnd = Arg.pack_end(); 6569 A != AEnd; (void)++A, ++Idx) 6570 CanonArgs[Idx] = getCanonicalTemplateArgument(*A); 6571 6572 return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size())); 6573 } 6574 } 6575 6576 // Silence GCC warning 6577 llvm_unreachable("Unhandled template argument kind"); 6578 } 6579 6580 NestedNameSpecifier * 6581 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6582 if (!NNS) 6583 return nullptr; 6584 6585 switch (NNS->getKind()) { 6586 case NestedNameSpecifier::Identifier: 6587 // Canonicalize the prefix but keep the identifier the same. 6588 return NestedNameSpecifier::Create(*this, 6589 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6590 NNS->getAsIdentifier()); 6591 6592 case NestedNameSpecifier::Namespace: 6593 // A namespace is canonical; build a nested-name-specifier with 6594 // this namespace and no prefix. 6595 return NestedNameSpecifier::Create(*this, nullptr, 6596 NNS->getAsNamespace()->getOriginalNamespace()); 6597 6598 case NestedNameSpecifier::NamespaceAlias: 6599 // A namespace is canonical; build a nested-name-specifier with 6600 // this namespace and no prefix. 6601 return NestedNameSpecifier::Create(*this, nullptr, 6602 NNS->getAsNamespaceAlias()->getNamespace() 6603 ->getOriginalNamespace()); 6604 6605 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6606 // latter will have the 'template' keyword when printed. 6607 case NestedNameSpecifier::TypeSpec: 6608 case NestedNameSpecifier::TypeSpecWithTemplate: { 6609 const Type *T = getCanonicalType(NNS->getAsType()); 6610 6611 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6612 // break it apart into its prefix and identifier, then reconsititute those 6613 // as the canonical nested-name-specifier. This is required to canonicalize 6614 // a dependent nested-name-specifier involving typedefs of dependent-name 6615 // types, e.g., 6616 // typedef typename T::type T1; 6617 // typedef typename T1::type T2; 6618 if (const auto *DNT = T->getAs<DependentNameType>()) 6619 return NestedNameSpecifier::Create( 6620 *this, DNT->getQualifier(), 6621 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6622 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6623 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6624 const_cast<Type *>(T)); 6625 6626 // TODO: Set 'Template' parameter to true for other template types. 6627 return NestedNameSpecifier::Create(*this, nullptr, false, 6628 const_cast<Type *>(T)); 6629 } 6630 6631 case NestedNameSpecifier::Global: 6632 case NestedNameSpecifier::Super: 6633 // The global specifier and __super specifer are canonical and unique. 6634 return NNS; 6635 } 6636 6637 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6638 } 6639 6640 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6641 // Handle the non-qualified case efficiently. 6642 if (!T.hasLocalQualifiers()) { 6643 // Handle the common positive case fast. 6644 if (const auto *AT = dyn_cast<ArrayType>(T)) 6645 return AT; 6646 } 6647 6648 // Handle the common negative case fast. 6649 if (!isa<ArrayType>(T.getCanonicalType())) 6650 return nullptr; 6651 6652 // Apply any qualifiers from the array type to the element type. This 6653 // implements C99 6.7.3p8: "If the specification of an array type includes 6654 // any type qualifiers, the element type is so qualified, not the array type." 6655 6656 // If we get here, we either have type qualifiers on the type, or we have 6657 // sugar such as a typedef in the way. If we have type qualifiers on the type 6658 // we must propagate them down into the element type. 6659 6660 SplitQualType split = T.getSplitDesugaredType(); 6661 Qualifiers qs = split.Quals; 6662 6663 // If we have a simple case, just return now. 6664 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6665 if (!ATy || qs.empty()) 6666 return ATy; 6667 6668 // Otherwise, we have an array and we have qualifiers on it. Push the 6669 // qualifiers into the array element type and return a new array type. 6670 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6671 6672 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6673 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6674 CAT->getSizeExpr(), 6675 CAT->getSizeModifier(), 6676 CAT->getIndexTypeCVRQualifiers())); 6677 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6678 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6679 IAT->getSizeModifier(), 6680 IAT->getIndexTypeCVRQualifiers())); 6681 6682 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6683 return cast<ArrayType>( 6684 getDependentSizedArrayType(NewEltTy, 6685 DSAT->getSizeExpr(), 6686 DSAT->getSizeModifier(), 6687 DSAT->getIndexTypeCVRQualifiers(), 6688 DSAT->getBracketsRange())); 6689 6690 const auto *VAT = cast<VariableArrayType>(ATy); 6691 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6692 VAT->getSizeExpr(), 6693 VAT->getSizeModifier(), 6694 VAT->getIndexTypeCVRQualifiers(), 6695 VAT->getBracketsRange())); 6696 } 6697 6698 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6699 if (T->isArrayType() || T->isFunctionType()) 6700 return getDecayedType(T); 6701 return T; 6702 } 6703 6704 QualType ASTContext::getSignatureParameterType(QualType T) const { 6705 T = getVariableArrayDecayedType(T); 6706 T = getAdjustedParameterType(T); 6707 return T.getUnqualifiedType(); 6708 } 6709 6710 QualType ASTContext::getExceptionObjectType(QualType T) const { 6711 // C++ [except.throw]p3: 6712 // A throw-expression initializes a temporary object, called the exception 6713 // object, the type of which is determined by removing any top-level 6714 // cv-qualifiers from the static type of the operand of throw and adjusting 6715 // the type from "array of T" or "function returning T" to "pointer to T" 6716 // or "pointer to function returning T", [...] 6717 T = getVariableArrayDecayedType(T); 6718 if (T->isArrayType() || T->isFunctionType()) 6719 T = getDecayedType(T); 6720 return T.getUnqualifiedType(); 6721 } 6722 6723 /// getArrayDecayedType - Return the properly qualified result of decaying the 6724 /// specified array type to a pointer. This operation is non-trivial when 6725 /// handling typedefs etc. The canonical type of "T" must be an array type, 6726 /// this returns a pointer to a properly qualified element of the array. 6727 /// 6728 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6729 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6730 // Get the element type with 'getAsArrayType' so that we don't lose any 6731 // typedefs in the element type of the array. This also handles propagation 6732 // of type qualifiers from the array type into the element type if present 6733 // (C99 6.7.3p8). 6734 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6735 assert(PrettyArrayType && "Not an array type!"); 6736 6737 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6738 6739 // int x[restrict 4] -> int *restrict 6740 QualType Result = getQualifiedType(PtrTy, 6741 PrettyArrayType->getIndexTypeQualifiers()); 6742 6743 // int x[_Nullable] -> int * _Nullable 6744 if (auto Nullability = Ty->getNullability(*this)) { 6745 Result = const_cast<ASTContext *>(this)->getAttributedType( 6746 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6747 } 6748 return Result; 6749 } 6750 6751 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6752 return getBaseElementType(array->getElementType()); 6753 } 6754 6755 QualType ASTContext::getBaseElementType(QualType type) const { 6756 Qualifiers qs; 6757 while (true) { 6758 SplitQualType split = type.getSplitDesugaredType(); 6759 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6760 if (!array) break; 6761 6762 type = array->getElementType(); 6763 qs.addConsistentQualifiers(split.Quals); 6764 } 6765 6766 return getQualifiedType(type, qs); 6767 } 6768 6769 /// getConstantArrayElementCount - Returns number of constant array elements. 6770 uint64_t 6771 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6772 uint64_t ElementCount = 1; 6773 do { 6774 ElementCount *= CA->getSize().getZExtValue(); 6775 CA = dyn_cast_or_null<ConstantArrayType>( 6776 CA->getElementType()->getAsArrayTypeUnsafe()); 6777 } while (CA); 6778 return ElementCount; 6779 } 6780 6781 /// getFloatingRank - Return a relative rank for floating point types. 6782 /// This routine will assert if passed a built-in type that isn't a float. 6783 static FloatingRank getFloatingRank(QualType T) { 6784 if (const auto *CT = T->getAs<ComplexType>()) 6785 return getFloatingRank(CT->getElementType()); 6786 6787 switch (T->castAs<BuiltinType>()->getKind()) { 6788 default: llvm_unreachable("getFloatingRank(): not a floating type"); 6789 case BuiltinType::Float16: return Float16Rank; 6790 case BuiltinType::Half: return HalfRank; 6791 case BuiltinType::Float: return FloatRank; 6792 case BuiltinType::Double: return DoubleRank; 6793 case BuiltinType::LongDouble: return LongDoubleRank; 6794 case BuiltinType::Float128: return Float128Rank; 6795 case BuiltinType::BFloat16: return BFloat16Rank; 6796 case BuiltinType::Ibm128: return Ibm128Rank; 6797 } 6798 } 6799 6800 /// getFloatingTypeOfSizeWithinDomain - Returns a real floating 6801 /// point or a complex type (based on typeDomain/typeSize). 6802 /// 'typeDomain' is a real floating point or complex type. 6803 /// 'typeSize' is a real floating point or complex type. 6804 QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, 6805 QualType Domain) const { 6806 FloatingRank EltRank = getFloatingRank(Size); 6807 if (Domain->isComplexType()) { 6808 switch (EltRank) { 6809 case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported"); 6810 case Float16Rank: 6811 case HalfRank: llvm_unreachable("Complex half is not supported"); 6812 case Ibm128Rank: return getComplexType(Ibm128Ty); 6813 case FloatRank: return getComplexType(FloatTy); 6814 case DoubleRank: return getComplexType(DoubleTy); 6815 case LongDoubleRank: return getComplexType(LongDoubleTy); 6816 case Float128Rank: return getComplexType(Float128Ty); 6817 } 6818 } 6819 6820 assert(Domain->isRealFloatingType() && "Unknown domain!"); 6821 switch (EltRank) { 6822 case Float16Rank: return HalfTy; 6823 case BFloat16Rank: return BFloat16Ty; 6824 case HalfRank: return HalfTy; 6825 case FloatRank: return FloatTy; 6826 case DoubleRank: return DoubleTy; 6827 case LongDoubleRank: return LongDoubleTy; 6828 case Float128Rank: return Float128Ty; 6829 case Ibm128Rank: 6830 return Ibm128Ty; 6831 } 6832 llvm_unreachable("getFloatingRank(): illegal value for rank"); 6833 } 6834 6835 /// getFloatingTypeOrder - Compare the rank of the two specified floating 6836 /// point types, ignoring the domain of the type (i.e. 'double' == 6837 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 6838 /// LHS < RHS, return -1. 6839 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 6840 FloatingRank LHSR = getFloatingRank(LHS); 6841 FloatingRank RHSR = getFloatingRank(RHS); 6842 6843 if (LHSR == RHSR) 6844 return 0; 6845 if (LHSR > RHSR) 6846 return 1; 6847 return -1; 6848 } 6849 6850 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 6851 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 6852 return 0; 6853 return getFloatingTypeOrder(LHS, RHS); 6854 } 6855 6856 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 6857 /// routine will assert if passed a built-in type that isn't an integer or enum, 6858 /// or if it is not canonicalized. 6859 unsigned ASTContext::getIntegerRank(const Type *T) const { 6860 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 6861 6862 // Results in this 'losing' to any type of the same size, but winning if 6863 // larger. 6864 if (const auto *EIT = dyn_cast<BitIntType>(T)) 6865 return 0 + (EIT->getNumBits() << 3); 6866 6867 switch (cast<BuiltinType>(T)->getKind()) { 6868 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 6869 case BuiltinType::Bool: 6870 return 1 + (getIntWidth(BoolTy) << 3); 6871 case BuiltinType::Char_S: 6872 case BuiltinType::Char_U: 6873 case BuiltinType::SChar: 6874 case BuiltinType::UChar: 6875 return 2 + (getIntWidth(CharTy) << 3); 6876 case BuiltinType::Short: 6877 case BuiltinType::UShort: 6878 return 3 + (getIntWidth(ShortTy) << 3); 6879 case BuiltinType::Int: 6880 case BuiltinType::UInt: 6881 return 4 + (getIntWidth(IntTy) << 3); 6882 case BuiltinType::Long: 6883 case BuiltinType::ULong: 6884 return 5 + (getIntWidth(LongTy) << 3); 6885 case BuiltinType::LongLong: 6886 case BuiltinType::ULongLong: 6887 return 6 + (getIntWidth(LongLongTy) << 3); 6888 case BuiltinType::Int128: 6889 case BuiltinType::UInt128: 6890 return 7 + (getIntWidth(Int128Ty) << 3); 6891 } 6892 } 6893 6894 /// Whether this is a promotable bitfield reference according 6895 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 6896 /// 6897 /// \returns the type this bit-field will promote to, or NULL if no 6898 /// promotion occurs. 6899 QualType ASTContext::isPromotableBitField(Expr *E) const { 6900 if (E->isTypeDependent() || E->isValueDependent()) 6901 return {}; 6902 6903 // C++ [conv.prom]p5: 6904 // If the bit-field has an enumerated type, it is treated as any other 6905 // value of that type for promotion purposes. 6906 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 6907 return {}; 6908 6909 // FIXME: We should not do this unless E->refersToBitField() is true. This 6910 // matters in C where getSourceBitField() will find bit-fields for various 6911 // cases where the source expression is not a bit-field designator. 6912 6913 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 6914 if (!Field) 6915 return {}; 6916 6917 QualType FT = Field->getType(); 6918 6919 uint64_t BitWidth = Field->getBitWidthValue(*this); 6920 uint64_t IntSize = getTypeSize(IntTy); 6921 // C++ [conv.prom]p5: 6922 // A prvalue for an integral bit-field can be converted to a prvalue of type 6923 // int if int can represent all the values of the bit-field; otherwise, it 6924 // can be converted to unsigned int if unsigned int can represent all the 6925 // values of the bit-field. If the bit-field is larger yet, no integral 6926 // promotion applies to it. 6927 // C11 6.3.1.1/2: 6928 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 6929 // If an int can represent all values of the original type (as restricted by 6930 // the width, for a bit-field), the value is converted to an int; otherwise, 6931 // it is converted to an unsigned int. 6932 // 6933 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 6934 // We perform that promotion here to match GCC and C++. 6935 // FIXME: C does not permit promotion of an enum bit-field whose rank is 6936 // greater than that of 'int'. We perform that promotion to match GCC. 6937 if (BitWidth < IntSize) 6938 return IntTy; 6939 6940 if (BitWidth == IntSize) 6941 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 6942 6943 // Bit-fields wider than int are not subject to promotions, and therefore act 6944 // like the base type. GCC has some weird bugs in this area that we 6945 // deliberately do not follow (GCC follows a pre-standard resolution to 6946 // C's DR315 which treats bit-width as being part of the type, and this leaks 6947 // into their semantics in some cases). 6948 return {}; 6949 } 6950 6951 /// getPromotedIntegerType - Returns the type that Promotable will 6952 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 6953 /// integer type. 6954 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 6955 assert(!Promotable.isNull()); 6956 assert(Promotable->isPromotableIntegerType()); 6957 if (const auto *ET = Promotable->getAs<EnumType>()) 6958 return ET->getDecl()->getPromotionType(); 6959 6960 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 6961 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 6962 // (3.9.1) can be converted to a prvalue of the first of the following 6963 // types that can represent all the values of its underlying type: 6964 // int, unsigned int, long int, unsigned long int, long long int, or 6965 // unsigned long long int [...] 6966 // FIXME: Is there some better way to compute this? 6967 if (BT->getKind() == BuiltinType::WChar_S || 6968 BT->getKind() == BuiltinType::WChar_U || 6969 BT->getKind() == BuiltinType::Char8 || 6970 BT->getKind() == BuiltinType::Char16 || 6971 BT->getKind() == BuiltinType::Char32) { 6972 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 6973 uint64_t FromSize = getTypeSize(BT); 6974 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 6975 LongLongTy, UnsignedLongLongTy }; 6976 for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { 6977 uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); 6978 if (FromSize < ToSize || 6979 (FromSize == ToSize && 6980 FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) 6981 return PromoteTypes[Idx]; 6982 } 6983 llvm_unreachable("char type should fit into long long"); 6984 } 6985 } 6986 6987 // At this point, we should have a signed or unsigned integer type. 6988 if (Promotable->isSignedIntegerType()) 6989 return IntTy; 6990 uint64_t PromotableSize = getIntWidth(Promotable); 6991 uint64_t IntSize = getIntWidth(IntTy); 6992 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 6993 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 6994 } 6995 6996 /// Recurses in pointer/array types until it finds an objc retainable 6997 /// type and returns its ownership. 6998 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 6999 while (!T.isNull()) { 7000 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7001 return T.getObjCLifetime(); 7002 if (T->isArrayType()) 7003 T = getBaseElementType(T); 7004 else if (const auto *PT = T->getAs<PointerType>()) 7005 T = PT->getPointeeType(); 7006 else if (const auto *RT = T->getAs<ReferenceType>()) 7007 T = RT->getPointeeType(); 7008 else 7009 break; 7010 } 7011 7012 return Qualifiers::OCL_None; 7013 } 7014 7015 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7016 // Incomplete enum types are not treated as integer types. 7017 // FIXME: In C++, enum types are never integer types. 7018 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7019 return ET->getDecl()->getIntegerType().getTypePtr(); 7020 return nullptr; 7021 } 7022 7023 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7024 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7025 /// LHS < RHS, return -1. 7026 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7027 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7028 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7029 7030 // Unwrap enums to their underlying type. 7031 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7032 LHSC = getIntegerTypeForEnum(ET); 7033 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7034 RHSC = getIntegerTypeForEnum(ET); 7035 7036 if (LHSC == RHSC) return 0; 7037 7038 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7039 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7040 7041 unsigned LHSRank = getIntegerRank(LHSC); 7042 unsigned RHSRank = getIntegerRank(RHSC); 7043 7044 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7045 if (LHSRank == RHSRank) return 0; 7046 return LHSRank > RHSRank ? 1 : -1; 7047 } 7048 7049 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7050 if (LHSUnsigned) { 7051 // If the unsigned [LHS] type is larger, return it. 7052 if (LHSRank >= RHSRank) 7053 return 1; 7054 7055 // If the signed type can represent all values of the unsigned type, it 7056 // wins. Because we are dealing with 2's complement and types that are 7057 // powers of two larger than each other, this is always safe. 7058 return -1; 7059 } 7060 7061 // If the unsigned [RHS] type is larger, return it. 7062 if (RHSRank >= LHSRank) 7063 return -1; 7064 7065 // If the signed type can represent all values of the unsigned type, it 7066 // wins. Because we are dealing with 2's complement and types that are 7067 // powers of two larger than each other, this is always safe. 7068 return 1; 7069 } 7070 7071 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7072 if (CFConstantStringTypeDecl) 7073 return CFConstantStringTypeDecl; 7074 7075 assert(!CFConstantStringTagDecl && 7076 "tag and typedef should be initialized together"); 7077 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7078 CFConstantStringTagDecl->startDefinition(); 7079 7080 struct { 7081 QualType Type; 7082 const char *Name; 7083 } Fields[5]; 7084 unsigned Count = 0; 7085 7086 /// Objective-C ABI 7087 /// 7088 /// typedef struct __NSConstantString_tag { 7089 /// const int *isa; 7090 /// int flags; 7091 /// const char *str; 7092 /// long length; 7093 /// } __NSConstantString; 7094 /// 7095 /// Swift ABI (4.1, 4.2) 7096 /// 7097 /// typedef struct __NSConstantString_tag { 7098 /// uintptr_t _cfisa; 7099 /// uintptr_t _swift_rc; 7100 /// _Atomic(uint64_t) _cfinfoa; 7101 /// const char *_ptr; 7102 /// uint32_t _length; 7103 /// } __NSConstantString; 7104 /// 7105 /// Swift ABI (5.0) 7106 /// 7107 /// typedef struct __NSConstantString_tag { 7108 /// uintptr_t _cfisa; 7109 /// uintptr_t _swift_rc; 7110 /// _Atomic(uint64_t) _cfinfoa; 7111 /// const char *_ptr; 7112 /// uintptr_t _length; 7113 /// } __NSConstantString; 7114 7115 const auto CFRuntime = getLangOpts().CFRuntime; 7116 if (static_cast<unsigned>(CFRuntime) < 7117 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7118 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7119 Fields[Count++] = { IntTy, "flags" }; 7120 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7121 Fields[Count++] = { LongTy, "length" }; 7122 } else { 7123 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7124 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7125 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7126 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7127 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7128 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7129 Fields[Count++] = { IntTy, "_ptr" }; 7130 else 7131 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7132 } 7133 7134 // Create fields 7135 for (unsigned i = 0; i < Count; ++i) { 7136 FieldDecl *Field = 7137 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7138 SourceLocation(), &Idents.get(Fields[i].Name), 7139 Fields[i].Type, /*TInfo=*/nullptr, 7140 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7141 Field->setAccess(AS_public); 7142 CFConstantStringTagDecl->addDecl(Field); 7143 } 7144 7145 CFConstantStringTagDecl->completeDefinition(); 7146 // This type is designed to be compatible with NSConstantString, but cannot 7147 // use the same name, since NSConstantString is an interface. 7148 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7149 CFConstantStringTypeDecl = 7150 buildImplicitTypedef(tagType, "__NSConstantString"); 7151 7152 return CFConstantStringTypeDecl; 7153 } 7154 7155 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7156 if (!CFConstantStringTagDecl) 7157 getCFConstantStringDecl(); // Build the tag and the typedef. 7158 return CFConstantStringTagDecl; 7159 } 7160 7161 // getCFConstantStringType - Return the type used for constant CFStrings. 7162 QualType ASTContext::getCFConstantStringType() const { 7163 return getTypedefType(getCFConstantStringDecl()); 7164 } 7165 7166 QualType ASTContext::getObjCSuperType() const { 7167 if (ObjCSuperType.isNull()) { 7168 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7169 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7170 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7171 } 7172 return ObjCSuperType; 7173 } 7174 7175 void ASTContext::setCFConstantStringType(QualType T) { 7176 const auto *TD = T->castAs<TypedefType>(); 7177 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7178 const auto *TagType = 7179 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7180 CFConstantStringTagDecl = TagType->getDecl(); 7181 } 7182 7183 QualType ASTContext::getBlockDescriptorType() const { 7184 if (BlockDescriptorType) 7185 return getTagDeclType(BlockDescriptorType); 7186 7187 RecordDecl *RD; 7188 // FIXME: Needs the FlagAppleBlock bit. 7189 RD = buildImplicitRecord("__block_descriptor"); 7190 RD->startDefinition(); 7191 7192 QualType FieldTypes[] = { 7193 UnsignedLongTy, 7194 UnsignedLongTy, 7195 }; 7196 7197 static const char *const FieldNames[] = { 7198 "reserved", 7199 "Size" 7200 }; 7201 7202 for (size_t i = 0; i < 2; ++i) { 7203 FieldDecl *Field = FieldDecl::Create( 7204 *this, RD, SourceLocation(), SourceLocation(), 7205 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7206 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7207 Field->setAccess(AS_public); 7208 RD->addDecl(Field); 7209 } 7210 7211 RD->completeDefinition(); 7212 7213 BlockDescriptorType = RD; 7214 7215 return getTagDeclType(BlockDescriptorType); 7216 } 7217 7218 QualType ASTContext::getBlockDescriptorExtendedType() const { 7219 if (BlockDescriptorExtendedType) 7220 return getTagDeclType(BlockDescriptorExtendedType); 7221 7222 RecordDecl *RD; 7223 // FIXME: Needs the FlagAppleBlock bit. 7224 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7225 RD->startDefinition(); 7226 7227 QualType FieldTypes[] = { 7228 UnsignedLongTy, 7229 UnsignedLongTy, 7230 getPointerType(VoidPtrTy), 7231 getPointerType(VoidPtrTy) 7232 }; 7233 7234 static const char *const FieldNames[] = { 7235 "reserved", 7236 "Size", 7237 "CopyFuncPtr", 7238 "DestroyFuncPtr" 7239 }; 7240 7241 for (size_t i = 0; i < 4; ++i) { 7242 FieldDecl *Field = FieldDecl::Create( 7243 *this, RD, SourceLocation(), SourceLocation(), 7244 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7245 /*BitWidth=*/nullptr, 7246 /*Mutable=*/false, ICIS_NoInit); 7247 Field->setAccess(AS_public); 7248 RD->addDecl(Field); 7249 } 7250 7251 RD->completeDefinition(); 7252 7253 BlockDescriptorExtendedType = RD; 7254 return getTagDeclType(BlockDescriptorExtendedType); 7255 } 7256 7257 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7258 const auto *BT = dyn_cast<BuiltinType>(T); 7259 7260 if (!BT) { 7261 if (isa<PipeType>(T)) 7262 return OCLTK_Pipe; 7263 7264 return OCLTK_Default; 7265 } 7266 7267 switch (BT->getKind()) { 7268 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7269 case BuiltinType::Id: \ 7270 return OCLTK_Image; 7271 #include "clang/Basic/OpenCLImageTypes.def" 7272 7273 case BuiltinType::OCLClkEvent: 7274 return OCLTK_ClkEvent; 7275 7276 case BuiltinType::OCLEvent: 7277 return OCLTK_Event; 7278 7279 case BuiltinType::OCLQueue: 7280 return OCLTK_Queue; 7281 7282 case BuiltinType::OCLReserveID: 7283 return OCLTK_ReserveID; 7284 7285 case BuiltinType::OCLSampler: 7286 return OCLTK_Sampler; 7287 7288 default: 7289 return OCLTK_Default; 7290 } 7291 } 7292 7293 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7294 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7295 } 7296 7297 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7298 /// requires copy/dispose. Note that this must match the logic 7299 /// in buildByrefHelpers. 7300 bool ASTContext::BlockRequiresCopying(QualType Ty, 7301 const VarDecl *D) { 7302 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7303 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7304 if (!copyExpr && record->hasTrivialDestructor()) return false; 7305 7306 return true; 7307 } 7308 7309 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7310 // move or destroy. 7311 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7312 return true; 7313 7314 if (!Ty->isObjCRetainableType()) return false; 7315 7316 Qualifiers qs = Ty.getQualifiers(); 7317 7318 // If we have lifetime, that dominates. 7319 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7320 switch (lifetime) { 7321 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7322 7323 // These are just bits as far as the runtime is concerned. 7324 case Qualifiers::OCL_ExplicitNone: 7325 case Qualifiers::OCL_Autoreleasing: 7326 return false; 7327 7328 // These cases should have been taken care of when checking the type's 7329 // non-triviality. 7330 case Qualifiers::OCL_Weak: 7331 case Qualifiers::OCL_Strong: 7332 llvm_unreachable("impossible"); 7333 } 7334 llvm_unreachable("fell out of lifetime switch!"); 7335 } 7336 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7337 Ty->isObjCObjectPointerType()); 7338 } 7339 7340 bool ASTContext::getByrefLifetime(QualType Ty, 7341 Qualifiers::ObjCLifetime &LifeTime, 7342 bool &HasByrefExtendedLayout) const { 7343 if (!getLangOpts().ObjC || 7344 getLangOpts().getGC() != LangOptions::NonGC) 7345 return false; 7346 7347 HasByrefExtendedLayout = false; 7348 if (Ty->isRecordType()) { 7349 HasByrefExtendedLayout = true; 7350 LifeTime = Qualifiers::OCL_None; 7351 } else if ((LifeTime = Ty.getObjCLifetime())) { 7352 // Honor the ARC qualifiers. 7353 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7354 // The MRR rule. 7355 LifeTime = Qualifiers::OCL_ExplicitNone; 7356 } else { 7357 LifeTime = Qualifiers::OCL_None; 7358 } 7359 return true; 7360 } 7361 7362 CanQualType ASTContext::getNSUIntegerType() const { 7363 assert(Target && "Expected target to be initialized"); 7364 const llvm::Triple &T = Target->getTriple(); 7365 // Windows is LLP64 rather than LP64 7366 if (T.isOSWindows() && T.isArch64Bit()) 7367 return UnsignedLongLongTy; 7368 return UnsignedLongTy; 7369 } 7370 7371 CanQualType ASTContext::getNSIntegerType() const { 7372 assert(Target && "Expected target to be initialized"); 7373 const llvm::Triple &T = Target->getTriple(); 7374 // Windows is LLP64 rather than LP64 7375 if (T.isOSWindows() && T.isArch64Bit()) 7376 return LongLongTy; 7377 return LongTy; 7378 } 7379 7380 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7381 if (!ObjCInstanceTypeDecl) 7382 ObjCInstanceTypeDecl = 7383 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7384 return ObjCInstanceTypeDecl; 7385 } 7386 7387 // This returns true if a type has been typedefed to BOOL: 7388 // typedef <type> BOOL; 7389 static bool isTypeTypedefedAsBOOL(QualType T) { 7390 if (const auto *TT = dyn_cast<TypedefType>(T)) 7391 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7392 return II->isStr("BOOL"); 7393 7394 return false; 7395 } 7396 7397 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7398 /// purpose. 7399 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7400 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7401 return CharUnits::Zero(); 7402 7403 CharUnits sz = getTypeSizeInChars(type); 7404 7405 // Make all integer and enum types at least as large as an int 7406 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7407 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7408 // Treat arrays as pointers, since that's how they're passed in. 7409 else if (type->isArrayType()) 7410 sz = getTypeSizeInChars(VoidPtrTy); 7411 return sz; 7412 } 7413 7414 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7415 return getTargetInfo().getCXXABI().isMicrosoft() && 7416 VD->isStaticDataMember() && 7417 VD->getType()->isIntegralOrEnumerationType() && 7418 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7419 } 7420 7421 ASTContext::InlineVariableDefinitionKind 7422 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7423 if (!VD->isInline()) 7424 return InlineVariableDefinitionKind::None; 7425 7426 // In almost all cases, it's a weak definition. 7427 auto *First = VD->getFirstDecl(); 7428 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7429 return InlineVariableDefinitionKind::Weak; 7430 7431 // If there's a file-context declaration in this translation unit, it's a 7432 // non-discardable definition. 7433 for (auto *D : VD->redecls()) 7434 if (D->getLexicalDeclContext()->isFileContext() && 7435 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7436 return InlineVariableDefinitionKind::Strong; 7437 7438 // If we've not seen one yet, we don't know. 7439 return InlineVariableDefinitionKind::WeakUnknown; 7440 } 7441 7442 static std::string charUnitsToString(const CharUnits &CU) { 7443 return llvm::itostr(CU.getQuantity()); 7444 } 7445 7446 /// getObjCEncodingForBlock - Return the encoded type for this block 7447 /// declaration. 7448 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7449 std::string S; 7450 7451 const BlockDecl *Decl = Expr->getBlockDecl(); 7452 QualType BlockTy = 7453 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7454 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7455 // Encode result type. 7456 if (getLangOpts().EncodeExtendedBlockSig) 7457 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7458 true /*Extended*/); 7459 else 7460 getObjCEncodingForType(BlockReturnTy, S); 7461 // Compute size of all parameters. 7462 // Start with computing size of a pointer in number of bytes. 7463 // FIXME: There might(should) be a better way of doing this computation! 7464 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7465 CharUnits ParmOffset = PtrSize; 7466 for (auto PI : Decl->parameters()) { 7467 QualType PType = PI->getType(); 7468 CharUnits sz = getObjCEncodingTypeSize(PType); 7469 if (sz.isZero()) 7470 continue; 7471 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7472 ParmOffset += sz; 7473 } 7474 // Size of the argument frame 7475 S += charUnitsToString(ParmOffset); 7476 // Block pointer and offset. 7477 S += "@?0"; 7478 7479 // Argument types. 7480 ParmOffset = PtrSize; 7481 for (auto PVDecl : Decl->parameters()) { 7482 QualType PType = PVDecl->getOriginalType(); 7483 if (const auto *AT = 7484 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7485 // Use array's original type only if it has known number of 7486 // elements. 7487 if (!isa<ConstantArrayType>(AT)) 7488 PType = PVDecl->getType(); 7489 } else if (PType->isFunctionType()) 7490 PType = PVDecl->getType(); 7491 if (getLangOpts().EncodeExtendedBlockSig) 7492 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7493 S, true /*Extended*/); 7494 else 7495 getObjCEncodingForType(PType, S); 7496 S += charUnitsToString(ParmOffset); 7497 ParmOffset += getObjCEncodingTypeSize(PType); 7498 } 7499 7500 return S; 7501 } 7502 7503 std::string 7504 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7505 std::string S; 7506 // Encode result type. 7507 getObjCEncodingForType(Decl->getReturnType(), S); 7508 CharUnits ParmOffset; 7509 // Compute size of all parameters. 7510 for (auto PI : Decl->parameters()) { 7511 QualType PType = PI->getType(); 7512 CharUnits sz = getObjCEncodingTypeSize(PType); 7513 if (sz.isZero()) 7514 continue; 7515 7516 assert(sz.isPositive() && 7517 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7518 ParmOffset += sz; 7519 } 7520 S += charUnitsToString(ParmOffset); 7521 ParmOffset = CharUnits::Zero(); 7522 7523 // Argument types. 7524 for (auto PVDecl : Decl->parameters()) { 7525 QualType PType = PVDecl->getOriginalType(); 7526 if (const auto *AT = 7527 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7528 // Use array's original type only if it has known number of 7529 // elements. 7530 if (!isa<ConstantArrayType>(AT)) 7531 PType = PVDecl->getType(); 7532 } else if (PType->isFunctionType()) 7533 PType = PVDecl->getType(); 7534 getObjCEncodingForType(PType, S); 7535 S += charUnitsToString(ParmOffset); 7536 ParmOffset += getObjCEncodingTypeSize(PType); 7537 } 7538 7539 return S; 7540 } 7541 7542 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7543 /// method parameter or return type. If Extended, include class names and 7544 /// block object types. 7545 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7546 QualType T, std::string& S, 7547 bool Extended) const { 7548 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7549 getObjCEncodingForTypeQualifier(QT, S); 7550 // Encode parameter type. 7551 ObjCEncOptions Options = ObjCEncOptions() 7552 .setExpandPointedToStructures() 7553 .setExpandStructures() 7554 .setIsOutermostType(); 7555 if (Extended) 7556 Options.setEncodeBlockParameters().setEncodeClassNames(); 7557 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7558 } 7559 7560 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7561 /// declaration. 7562 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7563 bool Extended) const { 7564 // FIXME: This is not very efficient. 7565 // Encode return type. 7566 std::string S; 7567 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7568 Decl->getReturnType(), S, Extended); 7569 // Compute size of all parameters. 7570 // Start with computing size of a pointer in number of bytes. 7571 // FIXME: There might(should) be a better way of doing this computation! 7572 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7573 // The first two arguments (self and _cmd) are pointers; account for 7574 // their size. 7575 CharUnits ParmOffset = 2 * PtrSize; 7576 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7577 E = Decl->sel_param_end(); PI != E; ++PI) { 7578 QualType PType = (*PI)->getType(); 7579 CharUnits sz = getObjCEncodingTypeSize(PType); 7580 if (sz.isZero()) 7581 continue; 7582 7583 assert(sz.isPositive() && 7584 "getObjCEncodingForMethodDecl - Incomplete param type"); 7585 ParmOffset += sz; 7586 } 7587 S += charUnitsToString(ParmOffset); 7588 S += "@0:"; 7589 S += charUnitsToString(PtrSize); 7590 7591 // Argument types. 7592 ParmOffset = 2 * PtrSize; 7593 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7594 E = Decl->sel_param_end(); PI != E; ++PI) { 7595 const ParmVarDecl *PVDecl = *PI; 7596 QualType PType = PVDecl->getOriginalType(); 7597 if (const auto *AT = 7598 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7599 // Use array's original type only if it has known number of 7600 // elements. 7601 if (!isa<ConstantArrayType>(AT)) 7602 PType = PVDecl->getType(); 7603 } else if (PType->isFunctionType()) 7604 PType = PVDecl->getType(); 7605 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7606 PType, S, Extended); 7607 S += charUnitsToString(ParmOffset); 7608 ParmOffset += getObjCEncodingTypeSize(PType); 7609 } 7610 7611 return S; 7612 } 7613 7614 ObjCPropertyImplDecl * 7615 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7616 const ObjCPropertyDecl *PD, 7617 const Decl *Container) const { 7618 if (!Container) 7619 return nullptr; 7620 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7621 for (auto *PID : CID->property_impls()) 7622 if (PID->getPropertyDecl() == PD) 7623 return PID; 7624 } else { 7625 const auto *OID = cast<ObjCImplementationDecl>(Container); 7626 for (auto *PID : OID->property_impls()) 7627 if (PID->getPropertyDecl() == PD) 7628 return PID; 7629 } 7630 return nullptr; 7631 } 7632 7633 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7634 /// property declaration. If non-NULL, Container must be either an 7635 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7636 /// NULL when getting encodings for protocol properties. 7637 /// Property attributes are stored as a comma-delimited C string. The simple 7638 /// attributes readonly and bycopy are encoded as single characters. The 7639 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7640 /// encoded as single characters, followed by an identifier. Property types 7641 /// are also encoded as a parametrized attribute. The characters used to encode 7642 /// these attributes are defined by the following enumeration: 7643 /// @code 7644 /// enum PropertyAttributes { 7645 /// kPropertyReadOnly = 'R', // property is read-only. 7646 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7647 /// kPropertyByref = '&', // property is a reference to the value last assigned 7648 /// kPropertyDynamic = 'D', // property is dynamic 7649 /// kPropertyGetter = 'G', // followed by getter selector name 7650 /// kPropertySetter = 'S', // followed by setter selector name 7651 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7652 /// kPropertyType = 'T' // followed by old-style type encoding. 7653 /// kPropertyWeak = 'W' // 'weak' property 7654 /// kPropertyStrong = 'P' // property GC'able 7655 /// kPropertyNonAtomic = 'N' // property non-atomic 7656 /// }; 7657 /// @endcode 7658 std::string 7659 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7660 const Decl *Container) const { 7661 // Collect information from the property implementation decl(s). 7662 bool Dynamic = false; 7663 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7664 7665 if (ObjCPropertyImplDecl *PropertyImpDecl = 7666 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7667 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7668 Dynamic = true; 7669 else 7670 SynthesizePID = PropertyImpDecl; 7671 } 7672 7673 // FIXME: This is not very efficient. 7674 std::string S = "T"; 7675 7676 // Encode result type. 7677 // GCC has some special rules regarding encoding of properties which 7678 // closely resembles encoding of ivars. 7679 getObjCEncodingForPropertyType(PD->getType(), S); 7680 7681 if (PD->isReadOnly()) { 7682 S += ",R"; 7683 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7684 S += ",C"; 7685 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7686 S += ",&"; 7687 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7688 S += ",W"; 7689 } else { 7690 switch (PD->getSetterKind()) { 7691 case ObjCPropertyDecl::Assign: break; 7692 case ObjCPropertyDecl::Copy: S += ",C"; break; 7693 case ObjCPropertyDecl::Retain: S += ",&"; break; 7694 case ObjCPropertyDecl::Weak: S += ",W"; break; 7695 } 7696 } 7697 7698 // It really isn't clear at all what this means, since properties 7699 // are "dynamic by default". 7700 if (Dynamic) 7701 S += ",D"; 7702 7703 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7704 S += ",N"; 7705 7706 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7707 S += ",G"; 7708 S += PD->getGetterName().getAsString(); 7709 } 7710 7711 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7712 S += ",S"; 7713 S += PD->getSetterName().getAsString(); 7714 } 7715 7716 if (SynthesizePID) { 7717 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7718 S += ",V"; 7719 S += OID->getNameAsString(); 7720 } 7721 7722 // FIXME: OBJCGC: weak & strong 7723 return S; 7724 } 7725 7726 /// getLegacyIntegralTypeEncoding - 7727 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7728 /// 'l' or 'L' , but not always. For typedefs, we need to use 7729 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7730 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7731 if (isa<TypedefType>(PointeeTy.getTypePtr())) { 7732 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7733 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7734 PointeeTy = UnsignedIntTy; 7735 else 7736 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7737 PointeeTy = IntTy; 7738 } 7739 } 7740 } 7741 7742 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7743 const FieldDecl *Field, 7744 QualType *NotEncodedT) const { 7745 // We follow the behavior of gcc, expanding structures which are 7746 // directly pointed to, and expanding embedded structures. Note that 7747 // these rules are sufficient to prevent recursive encoding of the 7748 // same type. 7749 getObjCEncodingForTypeImpl(T, S, 7750 ObjCEncOptions() 7751 .setExpandPointedToStructures() 7752 .setExpandStructures() 7753 .setIsOutermostType(), 7754 Field, NotEncodedT); 7755 } 7756 7757 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7758 std::string& S) const { 7759 // Encode result type. 7760 // GCC has some special rules regarding encoding of properties which 7761 // closely resembles encoding of ivars. 7762 getObjCEncodingForTypeImpl(T, S, 7763 ObjCEncOptions() 7764 .setExpandPointedToStructures() 7765 .setExpandStructures() 7766 .setIsOutermostType() 7767 .setEncodingProperty(), 7768 /*Field=*/nullptr); 7769 } 7770 7771 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7772 const BuiltinType *BT) { 7773 BuiltinType::Kind kind = BT->getKind(); 7774 switch (kind) { 7775 case BuiltinType::Void: return 'v'; 7776 case BuiltinType::Bool: return 'B'; 7777 case BuiltinType::Char8: 7778 case BuiltinType::Char_U: 7779 case BuiltinType::UChar: return 'C'; 7780 case BuiltinType::Char16: 7781 case BuiltinType::UShort: return 'S'; 7782 case BuiltinType::Char32: 7783 case BuiltinType::UInt: return 'I'; 7784 case BuiltinType::ULong: 7785 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7786 case BuiltinType::UInt128: return 'T'; 7787 case BuiltinType::ULongLong: return 'Q'; 7788 case BuiltinType::Char_S: 7789 case BuiltinType::SChar: return 'c'; 7790 case BuiltinType::Short: return 's'; 7791 case BuiltinType::WChar_S: 7792 case BuiltinType::WChar_U: 7793 case BuiltinType::Int: return 'i'; 7794 case BuiltinType::Long: 7795 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7796 case BuiltinType::LongLong: return 'q'; 7797 case BuiltinType::Int128: return 't'; 7798 case BuiltinType::Float: return 'f'; 7799 case BuiltinType::Double: return 'd'; 7800 case BuiltinType::LongDouble: return 'D'; 7801 case BuiltinType::NullPtr: return '*'; // like char* 7802 7803 case BuiltinType::BFloat16: 7804 case BuiltinType::Float16: 7805 case BuiltinType::Float128: 7806 case BuiltinType::Ibm128: 7807 case BuiltinType::Half: 7808 case BuiltinType::ShortAccum: 7809 case BuiltinType::Accum: 7810 case BuiltinType::LongAccum: 7811 case BuiltinType::UShortAccum: 7812 case BuiltinType::UAccum: 7813 case BuiltinType::ULongAccum: 7814 case BuiltinType::ShortFract: 7815 case BuiltinType::Fract: 7816 case BuiltinType::LongFract: 7817 case BuiltinType::UShortFract: 7818 case BuiltinType::UFract: 7819 case BuiltinType::ULongFract: 7820 case BuiltinType::SatShortAccum: 7821 case BuiltinType::SatAccum: 7822 case BuiltinType::SatLongAccum: 7823 case BuiltinType::SatUShortAccum: 7824 case BuiltinType::SatUAccum: 7825 case BuiltinType::SatULongAccum: 7826 case BuiltinType::SatShortFract: 7827 case BuiltinType::SatFract: 7828 case BuiltinType::SatLongFract: 7829 case BuiltinType::SatUShortFract: 7830 case BuiltinType::SatUFract: 7831 case BuiltinType::SatULongFract: 7832 // FIXME: potentially need @encodes for these! 7833 return ' '; 7834 7835 #define SVE_TYPE(Name, Id, SingletonId) \ 7836 case BuiltinType::Id: 7837 #include "clang/Basic/AArch64SVEACLETypes.def" 7838 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 7839 #include "clang/Basic/RISCVVTypes.def" 7840 { 7841 DiagnosticsEngine &Diags = C->getDiagnostics(); 7842 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 7843 "cannot yet @encode type %0"); 7844 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 7845 return ' '; 7846 } 7847 7848 case BuiltinType::ObjCId: 7849 case BuiltinType::ObjCClass: 7850 case BuiltinType::ObjCSel: 7851 llvm_unreachable("@encoding ObjC primitive type"); 7852 7853 // OpenCL and placeholder types don't need @encodings. 7854 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7855 case BuiltinType::Id: 7856 #include "clang/Basic/OpenCLImageTypes.def" 7857 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 7858 case BuiltinType::Id: 7859 #include "clang/Basic/OpenCLExtensionTypes.def" 7860 case BuiltinType::OCLEvent: 7861 case BuiltinType::OCLClkEvent: 7862 case BuiltinType::OCLQueue: 7863 case BuiltinType::OCLReserveID: 7864 case BuiltinType::OCLSampler: 7865 case BuiltinType::Dependent: 7866 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 7867 case BuiltinType::Id: 7868 #include "clang/Basic/PPCTypes.def" 7869 #define BUILTIN_TYPE(KIND, ID) 7870 #define PLACEHOLDER_TYPE(KIND, ID) \ 7871 case BuiltinType::KIND: 7872 #include "clang/AST/BuiltinTypes.def" 7873 llvm_unreachable("invalid builtin type for @encode"); 7874 } 7875 llvm_unreachable("invalid BuiltinType::Kind value"); 7876 } 7877 7878 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 7879 EnumDecl *Enum = ET->getDecl(); 7880 7881 // The encoding of an non-fixed enum type is always 'i', regardless of size. 7882 if (!Enum->isFixed()) 7883 return 'i'; 7884 7885 // The encoding of a fixed enum type matches its fixed underlying type. 7886 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 7887 return getObjCEncodingForPrimitiveType(C, BT); 7888 } 7889 7890 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 7891 QualType T, const FieldDecl *FD) { 7892 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 7893 S += 'b'; 7894 // The NeXT runtime encodes bit fields as b followed by the number of bits. 7895 // The GNU runtime requires more information; bitfields are encoded as b, 7896 // then the offset (in bits) of the first element, then the type of the 7897 // bitfield, then the size in bits. For example, in this structure: 7898 // 7899 // struct 7900 // { 7901 // int integer; 7902 // int flags:2; 7903 // }; 7904 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 7905 // runtime, but b32i2 for the GNU runtime. The reason for this extra 7906 // information is not especially sensible, but we're stuck with it for 7907 // compatibility with GCC, although providing it breaks anything that 7908 // actually uses runtime introspection and wants to work on both runtimes... 7909 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 7910 uint64_t Offset; 7911 7912 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 7913 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 7914 IVD); 7915 } else { 7916 const RecordDecl *RD = FD->getParent(); 7917 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 7918 Offset = RL.getFieldOffset(FD->getFieldIndex()); 7919 } 7920 7921 S += llvm::utostr(Offset); 7922 7923 if (const auto *ET = T->getAs<EnumType>()) 7924 S += ObjCEncodingForEnumType(Ctx, ET); 7925 else { 7926 const auto *BT = T->castAs<BuiltinType>(); 7927 S += getObjCEncodingForPrimitiveType(Ctx, BT); 7928 } 7929 } 7930 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 7931 } 7932 7933 // Helper function for determining whether the encoded type string would include 7934 // a template specialization type. 7935 static bool hasTemplateSpecializationInEncodedString(const Type *T, 7936 bool VisitBasesAndFields) { 7937 T = T->getBaseElementTypeUnsafe(); 7938 7939 if (auto *PT = T->getAs<PointerType>()) 7940 return hasTemplateSpecializationInEncodedString( 7941 PT->getPointeeType().getTypePtr(), false); 7942 7943 auto *CXXRD = T->getAsCXXRecordDecl(); 7944 7945 if (!CXXRD) 7946 return false; 7947 7948 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 7949 return true; 7950 7951 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 7952 return false; 7953 7954 for (auto B : CXXRD->bases()) 7955 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 7956 true)) 7957 return true; 7958 7959 for (auto *FD : CXXRD->fields()) 7960 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 7961 true)) 7962 return true; 7963 7964 return false; 7965 } 7966 7967 // FIXME: Use SmallString for accumulating string. 7968 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 7969 const ObjCEncOptions Options, 7970 const FieldDecl *FD, 7971 QualType *NotEncodedT) const { 7972 CanQualType CT = getCanonicalType(T); 7973 switch (CT->getTypeClass()) { 7974 case Type::Builtin: 7975 case Type::Enum: 7976 if (FD && FD->isBitField()) 7977 return EncodeBitField(this, S, T, FD); 7978 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 7979 S += getObjCEncodingForPrimitiveType(this, BT); 7980 else 7981 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 7982 return; 7983 7984 case Type::Complex: 7985 S += 'j'; 7986 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 7987 ObjCEncOptions(), 7988 /*Field=*/nullptr); 7989 return; 7990 7991 case Type::Atomic: 7992 S += 'A'; 7993 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 7994 ObjCEncOptions(), 7995 /*Field=*/nullptr); 7996 return; 7997 7998 // encoding for pointer or reference types. 7999 case Type::Pointer: 8000 case Type::LValueReference: 8001 case Type::RValueReference: { 8002 QualType PointeeTy; 8003 if (isa<PointerType>(CT)) { 8004 const auto *PT = T->castAs<PointerType>(); 8005 if (PT->isObjCSelType()) { 8006 S += ':'; 8007 return; 8008 } 8009 PointeeTy = PT->getPointeeType(); 8010 } else { 8011 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8012 } 8013 8014 bool isReadOnly = false; 8015 // For historical/compatibility reasons, the read-only qualifier of the 8016 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8017 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8018 // Also, do not emit the 'r' for anything but the outermost type! 8019 if (isa<TypedefType>(T.getTypePtr())) { 8020 if (Options.IsOutermostType() && T.isConstQualified()) { 8021 isReadOnly = true; 8022 S += 'r'; 8023 } 8024 } else if (Options.IsOutermostType()) { 8025 QualType P = PointeeTy; 8026 while (auto PT = P->getAs<PointerType>()) 8027 P = PT->getPointeeType(); 8028 if (P.isConstQualified()) { 8029 isReadOnly = true; 8030 S += 'r'; 8031 } 8032 } 8033 if (isReadOnly) { 8034 // Another legacy compatibility encoding. Some ObjC qualifier and type 8035 // combinations need to be rearranged. 8036 // Rewrite "in const" from "nr" to "rn" 8037 if (StringRef(S).endswith("nr")) 8038 S.replace(S.end()-2, S.end(), "rn"); 8039 } 8040 8041 if (PointeeTy->isCharType()) { 8042 // char pointer types should be encoded as '*' unless it is a 8043 // type that has been typedef'd to 'BOOL'. 8044 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8045 S += '*'; 8046 return; 8047 } 8048 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8049 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8050 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8051 S += '#'; 8052 return; 8053 } 8054 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8055 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8056 S += '@'; 8057 return; 8058 } 8059 // If the encoded string for the class includes template names, just emit 8060 // "^v" for pointers to the class. 8061 if (getLangOpts().CPlusPlus && 8062 (!getLangOpts().EncodeCXXClassTemplateSpec && 8063 hasTemplateSpecializationInEncodedString( 8064 RTy, Options.ExpandPointedToStructures()))) { 8065 S += "^v"; 8066 return; 8067 } 8068 // fall through... 8069 } 8070 S += '^'; 8071 getLegacyIntegralTypeEncoding(PointeeTy); 8072 8073 ObjCEncOptions NewOptions; 8074 if (Options.ExpandPointedToStructures()) 8075 NewOptions.setExpandStructures(); 8076 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8077 /*Field=*/nullptr, NotEncodedT); 8078 return; 8079 } 8080 8081 case Type::ConstantArray: 8082 case Type::IncompleteArray: 8083 case Type::VariableArray: { 8084 const auto *AT = cast<ArrayType>(CT); 8085 8086 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8087 // Incomplete arrays are encoded as a pointer to the array element. 8088 S += '^'; 8089 8090 getObjCEncodingForTypeImpl( 8091 AT->getElementType(), S, 8092 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8093 } else { 8094 S += '['; 8095 8096 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8097 S += llvm::utostr(CAT->getSize().getZExtValue()); 8098 else { 8099 //Variable length arrays are encoded as a regular array with 0 elements. 8100 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8101 "Unknown array type!"); 8102 S += '0'; 8103 } 8104 8105 getObjCEncodingForTypeImpl( 8106 AT->getElementType(), S, 8107 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8108 NotEncodedT); 8109 S += ']'; 8110 } 8111 return; 8112 } 8113 8114 case Type::FunctionNoProto: 8115 case Type::FunctionProto: 8116 S += '?'; 8117 return; 8118 8119 case Type::Record: { 8120 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8121 S += RDecl->isUnion() ? '(' : '{'; 8122 // Anonymous structures print as '?' 8123 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8124 S += II->getName(); 8125 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8126 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8127 llvm::raw_string_ostream OS(S); 8128 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8129 getPrintingPolicy()); 8130 } 8131 } else { 8132 S += '?'; 8133 } 8134 if (Options.ExpandStructures()) { 8135 S += '='; 8136 if (!RDecl->isUnion()) { 8137 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8138 } else { 8139 for (const auto *Field : RDecl->fields()) { 8140 if (FD) { 8141 S += '"'; 8142 S += Field->getNameAsString(); 8143 S += '"'; 8144 } 8145 8146 // Special case bit-fields. 8147 if (Field->isBitField()) { 8148 getObjCEncodingForTypeImpl(Field->getType(), S, 8149 ObjCEncOptions().setExpandStructures(), 8150 Field); 8151 } else { 8152 QualType qt = Field->getType(); 8153 getLegacyIntegralTypeEncoding(qt); 8154 getObjCEncodingForTypeImpl( 8155 qt, S, 8156 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8157 NotEncodedT); 8158 } 8159 } 8160 } 8161 } 8162 S += RDecl->isUnion() ? ')' : '}'; 8163 return; 8164 } 8165 8166 case Type::BlockPointer: { 8167 const auto *BT = T->castAs<BlockPointerType>(); 8168 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8169 if (Options.EncodeBlockParameters()) { 8170 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8171 8172 S += '<'; 8173 // Block return type 8174 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8175 Options.forComponentType(), FD, NotEncodedT); 8176 // Block self 8177 S += "@?"; 8178 // Block parameters 8179 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8180 for (const auto &I : FPT->param_types()) 8181 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8182 NotEncodedT); 8183 } 8184 S += '>'; 8185 } 8186 return; 8187 } 8188 8189 case Type::ObjCObject: { 8190 // hack to match legacy encoding of *id and *Class 8191 QualType Ty = getObjCObjectPointerType(CT); 8192 if (Ty->isObjCIdType()) { 8193 S += "{objc_object=}"; 8194 return; 8195 } 8196 else if (Ty->isObjCClassType()) { 8197 S += "{objc_class=}"; 8198 return; 8199 } 8200 // TODO: Double check to make sure this intentionally falls through. 8201 LLVM_FALLTHROUGH; 8202 } 8203 8204 case Type::ObjCInterface: { 8205 // Ignore protocol qualifiers when mangling at this level. 8206 // @encode(class_name) 8207 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8208 S += '{'; 8209 S += OI->getObjCRuntimeNameAsString(); 8210 if (Options.ExpandStructures()) { 8211 S += '='; 8212 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8213 DeepCollectObjCIvars(OI, true, Ivars); 8214 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8215 const FieldDecl *Field = Ivars[i]; 8216 if (Field->isBitField()) 8217 getObjCEncodingForTypeImpl(Field->getType(), S, 8218 ObjCEncOptions().setExpandStructures(), 8219 Field); 8220 else 8221 getObjCEncodingForTypeImpl(Field->getType(), S, 8222 ObjCEncOptions().setExpandStructures(), FD, 8223 NotEncodedT); 8224 } 8225 } 8226 S += '}'; 8227 return; 8228 } 8229 8230 case Type::ObjCObjectPointer: { 8231 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8232 if (OPT->isObjCIdType()) { 8233 S += '@'; 8234 return; 8235 } 8236 8237 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8238 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8239 // Since this is a binary compatibility issue, need to consult with 8240 // runtime folks. Fortunately, this is a *very* obscure construct. 8241 S += '#'; 8242 return; 8243 } 8244 8245 if (OPT->isObjCQualifiedIdType()) { 8246 getObjCEncodingForTypeImpl( 8247 getObjCIdType(), S, 8248 Options.keepingOnly(ObjCEncOptions() 8249 .setExpandPointedToStructures() 8250 .setExpandStructures()), 8251 FD); 8252 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8253 // Note that we do extended encoding of protocol qualifier list 8254 // Only when doing ivar or property encoding. 8255 S += '"'; 8256 for (const auto *I : OPT->quals()) { 8257 S += '<'; 8258 S += I->getObjCRuntimeNameAsString(); 8259 S += '>'; 8260 } 8261 S += '"'; 8262 } 8263 return; 8264 } 8265 8266 S += '@'; 8267 if (OPT->getInterfaceDecl() && 8268 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8269 S += '"'; 8270 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8271 for (const auto *I : OPT->quals()) { 8272 S += '<'; 8273 S += I->getObjCRuntimeNameAsString(); 8274 S += '>'; 8275 } 8276 S += '"'; 8277 } 8278 return; 8279 } 8280 8281 // gcc just blithely ignores member pointers. 8282 // FIXME: we should do better than that. 'M' is available. 8283 case Type::MemberPointer: 8284 // This matches gcc's encoding, even though technically it is insufficient. 8285 //FIXME. We should do a better job than gcc. 8286 case Type::Vector: 8287 case Type::ExtVector: 8288 // Until we have a coherent encoding of these three types, issue warning. 8289 if (NotEncodedT) 8290 *NotEncodedT = T; 8291 return; 8292 8293 case Type::ConstantMatrix: 8294 if (NotEncodedT) 8295 *NotEncodedT = T; 8296 return; 8297 8298 case Type::BitInt: 8299 if (NotEncodedT) 8300 *NotEncodedT = T; 8301 return; 8302 8303 // We could see an undeduced auto type here during error recovery. 8304 // Just ignore it. 8305 case Type::Auto: 8306 case Type::DeducedTemplateSpecialization: 8307 return; 8308 8309 case Type::Pipe: 8310 #define ABSTRACT_TYPE(KIND, BASE) 8311 #define TYPE(KIND, BASE) 8312 #define DEPENDENT_TYPE(KIND, BASE) \ 8313 case Type::KIND: 8314 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8315 case Type::KIND: 8316 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8317 case Type::KIND: 8318 #include "clang/AST/TypeNodes.inc" 8319 llvm_unreachable("@encode for dependent type!"); 8320 } 8321 llvm_unreachable("bad type kind!"); 8322 } 8323 8324 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8325 std::string &S, 8326 const FieldDecl *FD, 8327 bool includeVBases, 8328 QualType *NotEncodedT) const { 8329 assert(RDecl && "Expected non-null RecordDecl"); 8330 assert(!RDecl->isUnion() && "Should not be called for unions"); 8331 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8332 return; 8333 8334 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8335 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8336 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8337 8338 if (CXXRec) { 8339 for (const auto &BI : CXXRec->bases()) { 8340 if (!BI.isVirtual()) { 8341 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8342 if (base->isEmpty()) 8343 continue; 8344 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8345 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8346 std::make_pair(offs, base)); 8347 } 8348 } 8349 } 8350 8351 unsigned i = 0; 8352 for (FieldDecl *Field : RDecl->fields()) { 8353 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8354 continue; 8355 uint64_t offs = layout.getFieldOffset(i); 8356 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8357 std::make_pair(offs, Field)); 8358 ++i; 8359 } 8360 8361 if (CXXRec && includeVBases) { 8362 for (const auto &BI : CXXRec->vbases()) { 8363 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8364 if (base->isEmpty()) 8365 continue; 8366 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8367 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8368 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8369 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8370 std::make_pair(offs, base)); 8371 } 8372 } 8373 8374 CharUnits size; 8375 if (CXXRec) { 8376 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8377 } else { 8378 size = layout.getSize(); 8379 } 8380 8381 #ifndef NDEBUG 8382 uint64_t CurOffs = 0; 8383 #endif 8384 std::multimap<uint64_t, NamedDecl *>::iterator 8385 CurLayObj = FieldOrBaseOffsets.begin(); 8386 8387 if (CXXRec && CXXRec->isDynamicClass() && 8388 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8389 if (FD) { 8390 S += "\"_vptr$"; 8391 std::string recname = CXXRec->getNameAsString(); 8392 if (recname.empty()) recname = "?"; 8393 S += recname; 8394 S += '"'; 8395 } 8396 S += "^^?"; 8397 #ifndef NDEBUG 8398 CurOffs += getTypeSize(VoidPtrTy); 8399 #endif 8400 } 8401 8402 if (!RDecl->hasFlexibleArrayMember()) { 8403 // Mark the end of the structure. 8404 uint64_t offs = toBits(size); 8405 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8406 std::make_pair(offs, nullptr)); 8407 } 8408 8409 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8410 #ifndef NDEBUG 8411 assert(CurOffs <= CurLayObj->first); 8412 if (CurOffs < CurLayObj->first) { 8413 uint64_t padding = CurLayObj->first - CurOffs; 8414 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8415 // packing/alignment of members is different that normal, in which case 8416 // the encoding will be out-of-sync with the real layout. 8417 // If the runtime switches to just consider the size of types without 8418 // taking into account alignment, we could make padding explicit in the 8419 // encoding (e.g. using arrays of chars). The encoding strings would be 8420 // longer then though. 8421 CurOffs += padding; 8422 } 8423 #endif 8424 8425 NamedDecl *dcl = CurLayObj->second; 8426 if (!dcl) 8427 break; // reached end of structure. 8428 8429 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8430 // We expand the bases without their virtual bases since those are going 8431 // in the initial structure. Note that this differs from gcc which 8432 // expands virtual bases each time one is encountered in the hierarchy, 8433 // making the encoding type bigger than it really is. 8434 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8435 NotEncodedT); 8436 assert(!base->isEmpty()); 8437 #ifndef NDEBUG 8438 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8439 #endif 8440 } else { 8441 const auto *field = cast<FieldDecl>(dcl); 8442 if (FD) { 8443 S += '"'; 8444 S += field->getNameAsString(); 8445 S += '"'; 8446 } 8447 8448 if (field->isBitField()) { 8449 EncodeBitField(this, S, field->getType(), field); 8450 #ifndef NDEBUG 8451 CurOffs += field->getBitWidthValue(*this); 8452 #endif 8453 } else { 8454 QualType qt = field->getType(); 8455 getLegacyIntegralTypeEncoding(qt); 8456 getObjCEncodingForTypeImpl( 8457 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8458 FD, NotEncodedT); 8459 #ifndef NDEBUG 8460 CurOffs += getTypeSize(field->getType()); 8461 #endif 8462 } 8463 } 8464 } 8465 } 8466 8467 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8468 std::string& S) const { 8469 if (QT & Decl::OBJC_TQ_In) 8470 S += 'n'; 8471 if (QT & Decl::OBJC_TQ_Inout) 8472 S += 'N'; 8473 if (QT & Decl::OBJC_TQ_Out) 8474 S += 'o'; 8475 if (QT & Decl::OBJC_TQ_Bycopy) 8476 S += 'O'; 8477 if (QT & Decl::OBJC_TQ_Byref) 8478 S += 'R'; 8479 if (QT & Decl::OBJC_TQ_Oneway) 8480 S += 'V'; 8481 } 8482 8483 TypedefDecl *ASTContext::getObjCIdDecl() const { 8484 if (!ObjCIdDecl) { 8485 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8486 T = getObjCObjectPointerType(T); 8487 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8488 } 8489 return ObjCIdDecl; 8490 } 8491 8492 TypedefDecl *ASTContext::getObjCSelDecl() const { 8493 if (!ObjCSelDecl) { 8494 QualType T = getPointerType(ObjCBuiltinSelTy); 8495 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8496 } 8497 return ObjCSelDecl; 8498 } 8499 8500 TypedefDecl *ASTContext::getObjCClassDecl() const { 8501 if (!ObjCClassDecl) { 8502 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8503 T = getObjCObjectPointerType(T); 8504 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8505 } 8506 return ObjCClassDecl; 8507 } 8508 8509 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8510 if (!ObjCProtocolClassDecl) { 8511 ObjCProtocolClassDecl 8512 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8513 SourceLocation(), 8514 &Idents.get("Protocol"), 8515 /*typeParamList=*/nullptr, 8516 /*PrevDecl=*/nullptr, 8517 SourceLocation(), true); 8518 } 8519 8520 return ObjCProtocolClassDecl; 8521 } 8522 8523 //===----------------------------------------------------------------------===// 8524 // __builtin_va_list Construction Functions 8525 //===----------------------------------------------------------------------===// 8526 8527 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8528 StringRef Name) { 8529 // typedef char* __builtin[_ms]_va_list; 8530 QualType T = Context->getPointerType(Context->CharTy); 8531 return Context->buildImplicitTypedef(T, Name); 8532 } 8533 8534 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8535 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8536 } 8537 8538 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8539 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8540 } 8541 8542 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8543 // typedef void* __builtin_va_list; 8544 QualType T = Context->getPointerType(Context->VoidTy); 8545 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8546 } 8547 8548 static TypedefDecl * 8549 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8550 // struct __va_list 8551 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8552 if (Context->getLangOpts().CPlusPlus) { 8553 // namespace std { struct __va_list { 8554 auto *NS = NamespaceDecl::Create( 8555 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8556 /*Inline*/ false, SourceLocation(), SourceLocation(), 8557 &Context->Idents.get("std"), 8558 /*PrevDecl*/ nullptr); 8559 NS->setImplicit(); 8560 VaListTagDecl->setDeclContext(NS); 8561 } 8562 8563 VaListTagDecl->startDefinition(); 8564 8565 const size_t NumFields = 5; 8566 QualType FieldTypes[NumFields]; 8567 const char *FieldNames[NumFields]; 8568 8569 // void *__stack; 8570 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8571 FieldNames[0] = "__stack"; 8572 8573 // void *__gr_top; 8574 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8575 FieldNames[1] = "__gr_top"; 8576 8577 // void *__vr_top; 8578 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8579 FieldNames[2] = "__vr_top"; 8580 8581 // int __gr_offs; 8582 FieldTypes[3] = Context->IntTy; 8583 FieldNames[3] = "__gr_offs"; 8584 8585 // int __vr_offs; 8586 FieldTypes[4] = Context->IntTy; 8587 FieldNames[4] = "__vr_offs"; 8588 8589 // Create fields 8590 for (unsigned i = 0; i < NumFields; ++i) { 8591 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8592 VaListTagDecl, 8593 SourceLocation(), 8594 SourceLocation(), 8595 &Context->Idents.get(FieldNames[i]), 8596 FieldTypes[i], /*TInfo=*/nullptr, 8597 /*BitWidth=*/nullptr, 8598 /*Mutable=*/false, 8599 ICIS_NoInit); 8600 Field->setAccess(AS_public); 8601 VaListTagDecl->addDecl(Field); 8602 } 8603 VaListTagDecl->completeDefinition(); 8604 Context->VaListTagDecl = VaListTagDecl; 8605 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8606 8607 // } __builtin_va_list; 8608 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8609 } 8610 8611 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8612 // typedef struct __va_list_tag { 8613 RecordDecl *VaListTagDecl; 8614 8615 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8616 VaListTagDecl->startDefinition(); 8617 8618 const size_t NumFields = 5; 8619 QualType FieldTypes[NumFields]; 8620 const char *FieldNames[NumFields]; 8621 8622 // unsigned char gpr; 8623 FieldTypes[0] = Context->UnsignedCharTy; 8624 FieldNames[0] = "gpr"; 8625 8626 // unsigned char fpr; 8627 FieldTypes[1] = Context->UnsignedCharTy; 8628 FieldNames[1] = "fpr"; 8629 8630 // unsigned short reserved; 8631 FieldTypes[2] = Context->UnsignedShortTy; 8632 FieldNames[2] = "reserved"; 8633 8634 // void* overflow_arg_area; 8635 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8636 FieldNames[3] = "overflow_arg_area"; 8637 8638 // void* reg_save_area; 8639 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8640 FieldNames[4] = "reg_save_area"; 8641 8642 // Create fields 8643 for (unsigned i = 0; i < NumFields; ++i) { 8644 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8645 SourceLocation(), 8646 SourceLocation(), 8647 &Context->Idents.get(FieldNames[i]), 8648 FieldTypes[i], /*TInfo=*/nullptr, 8649 /*BitWidth=*/nullptr, 8650 /*Mutable=*/false, 8651 ICIS_NoInit); 8652 Field->setAccess(AS_public); 8653 VaListTagDecl->addDecl(Field); 8654 } 8655 VaListTagDecl->completeDefinition(); 8656 Context->VaListTagDecl = VaListTagDecl; 8657 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8658 8659 // } __va_list_tag; 8660 TypedefDecl *VaListTagTypedefDecl = 8661 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8662 8663 QualType VaListTagTypedefType = 8664 Context->getTypedefType(VaListTagTypedefDecl); 8665 8666 // typedef __va_list_tag __builtin_va_list[1]; 8667 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8668 QualType VaListTagArrayType 8669 = Context->getConstantArrayType(VaListTagTypedefType, 8670 Size, nullptr, ArrayType::Normal, 0); 8671 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8672 } 8673 8674 static TypedefDecl * 8675 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8676 // struct __va_list_tag { 8677 RecordDecl *VaListTagDecl; 8678 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8679 VaListTagDecl->startDefinition(); 8680 8681 const size_t NumFields = 4; 8682 QualType FieldTypes[NumFields]; 8683 const char *FieldNames[NumFields]; 8684 8685 // unsigned gp_offset; 8686 FieldTypes[0] = Context->UnsignedIntTy; 8687 FieldNames[0] = "gp_offset"; 8688 8689 // unsigned fp_offset; 8690 FieldTypes[1] = Context->UnsignedIntTy; 8691 FieldNames[1] = "fp_offset"; 8692 8693 // void* overflow_arg_area; 8694 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8695 FieldNames[2] = "overflow_arg_area"; 8696 8697 // void* reg_save_area; 8698 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8699 FieldNames[3] = "reg_save_area"; 8700 8701 // Create fields 8702 for (unsigned i = 0; i < NumFields; ++i) { 8703 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8704 VaListTagDecl, 8705 SourceLocation(), 8706 SourceLocation(), 8707 &Context->Idents.get(FieldNames[i]), 8708 FieldTypes[i], /*TInfo=*/nullptr, 8709 /*BitWidth=*/nullptr, 8710 /*Mutable=*/false, 8711 ICIS_NoInit); 8712 Field->setAccess(AS_public); 8713 VaListTagDecl->addDecl(Field); 8714 } 8715 VaListTagDecl->completeDefinition(); 8716 Context->VaListTagDecl = VaListTagDecl; 8717 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8718 8719 // }; 8720 8721 // typedef struct __va_list_tag __builtin_va_list[1]; 8722 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8723 QualType VaListTagArrayType = Context->getConstantArrayType( 8724 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8725 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8726 } 8727 8728 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8729 // typedef int __builtin_va_list[4]; 8730 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8731 QualType IntArrayType = Context->getConstantArrayType( 8732 Context->IntTy, Size, nullptr, ArrayType::Normal, 0); 8733 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8734 } 8735 8736 static TypedefDecl * 8737 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8738 // struct __va_list 8739 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8740 if (Context->getLangOpts().CPlusPlus) { 8741 // namespace std { struct __va_list { 8742 NamespaceDecl *NS; 8743 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8744 Context->getTranslationUnitDecl(), 8745 /*Inline*/false, SourceLocation(), 8746 SourceLocation(), &Context->Idents.get("std"), 8747 /*PrevDecl*/ nullptr); 8748 NS->setImplicit(); 8749 VaListDecl->setDeclContext(NS); 8750 } 8751 8752 VaListDecl->startDefinition(); 8753 8754 // void * __ap; 8755 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8756 VaListDecl, 8757 SourceLocation(), 8758 SourceLocation(), 8759 &Context->Idents.get("__ap"), 8760 Context->getPointerType(Context->VoidTy), 8761 /*TInfo=*/nullptr, 8762 /*BitWidth=*/nullptr, 8763 /*Mutable=*/false, 8764 ICIS_NoInit); 8765 Field->setAccess(AS_public); 8766 VaListDecl->addDecl(Field); 8767 8768 // }; 8769 VaListDecl->completeDefinition(); 8770 Context->VaListTagDecl = VaListDecl; 8771 8772 // typedef struct __va_list __builtin_va_list; 8773 QualType T = Context->getRecordType(VaListDecl); 8774 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8775 } 8776 8777 static TypedefDecl * 8778 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8779 // struct __va_list_tag { 8780 RecordDecl *VaListTagDecl; 8781 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8782 VaListTagDecl->startDefinition(); 8783 8784 const size_t NumFields = 4; 8785 QualType FieldTypes[NumFields]; 8786 const char *FieldNames[NumFields]; 8787 8788 // long __gpr; 8789 FieldTypes[0] = Context->LongTy; 8790 FieldNames[0] = "__gpr"; 8791 8792 // long __fpr; 8793 FieldTypes[1] = Context->LongTy; 8794 FieldNames[1] = "__fpr"; 8795 8796 // void *__overflow_arg_area; 8797 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8798 FieldNames[2] = "__overflow_arg_area"; 8799 8800 // void *__reg_save_area; 8801 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8802 FieldNames[3] = "__reg_save_area"; 8803 8804 // Create fields 8805 for (unsigned i = 0; i < NumFields; ++i) { 8806 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8807 VaListTagDecl, 8808 SourceLocation(), 8809 SourceLocation(), 8810 &Context->Idents.get(FieldNames[i]), 8811 FieldTypes[i], /*TInfo=*/nullptr, 8812 /*BitWidth=*/nullptr, 8813 /*Mutable=*/false, 8814 ICIS_NoInit); 8815 Field->setAccess(AS_public); 8816 VaListTagDecl->addDecl(Field); 8817 } 8818 VaListTagDecl->completeDefinition(); 8819 Context->VaListTagDecl = VaListTagDecl; 8820 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8821 8822 // }; 8823 8824 // typedef __va_list_tag __builtin_va_list[1]; 8825 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8826 QualType VaListTagArrayType = Context->getConstantArrayType( 8827 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8828 8829 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8830 } 8831 8832 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 8833 // typedef struct __va_list_tag { 8834 RecordDecl *VaListTagDecl; 8835 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8836 VaListTagDecl->startDefinition(); 8837 8838 const size_t NumFields = 3; 8839 QualType FieldTypes[NumFields]; 8840 const char *FieldNames[NumFields]; 8841 8842 // void *CurrentSavedRegisterArea; 8843 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8844 FieldNames[0] = "__current_saved_reg_area_pointer"; 8845 8846 // void *SavedRegAreaEnd; 8847 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8848 FieldNames[1] = "__saved_reg_area_end_pointer"; 8849 8850 // void *OverflowArea; 8851 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8852 FieldNames[2] = "__overflow_area_pointer"; 8853 8854 // Create fields 8855 for (unsigned i = 0; i < NumFields; ++i) { 8856 FieldDecl *Field = FieldDecl::Create( 8857 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 8858 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 8859 /*TInfo=*/nullptr, 8860 /*BitWidth=*/nullptr, 8861 /*Mutable=*/false, ICIS_NoInit); 8862 Field->setAccess(AS_public); 8863 VaListTagDecl->addDecl(Field); 8864 } 8865 VaListTagDecl->completeDefinition(); 8866 Context->VaListTagDecl = VaListTagDecl; 8867 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8868 8869 // } __va_list_tag; 8870 TypedefDecl *VaListTagTypedefDecl = 8871 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8872 8873 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 8874 8875 // typedef __va_list_tag __builtin_va_list[1]; 8876 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8877 QualType VaListTagArrayType = Context->getConstantArrayType( 8878 VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); 8879 8880 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8881 } 8882 8883 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 8884 TargetInfo::BuiltinVaListKind Kind) { 8885 switch (Kind) { 8886 case TargetInfo::CharPtrBuiltinVaList: 8887 return CreateCharPtrBuiltinVaListDecl(Context); 8888 case TargetInfo::VoidPtrBuiltinVaList: 8889 return CreateVoidPtrBuiltinVaListDecl(Context); 8890 case TargetInfo::AArch64ABIBuiltinVaList: 8891 return CreateAArch64ABIBuiltinVaListDecl(Context); 8892 case TargetInfo::PowerABIBuiltinVaList: 8893 return CreatePowerABIBuiltinVaListDecl(Context); 8894 case TargetInfo::X86_64ABIBuiltinVaList: 8895 return CreateX86_64ABIBuiltinVaListDecl(Context); 8896 case TargetInfo::PNaClABIBuiltinVaList: 8897 return CreatePNaClABIBuiltinVaListDecl(Context); 8898 case TargetInfo::AAPCSABIBuiltinVaList: 8899 return CreateAAPCSABIBuiltinVaListDecl(Context); 8900 case TargetInfo::SystemZBuiltinVaList: 8901 return CreateSystemZBuiltinVaListDecl(Context); 8902 case TargetInfo::HexagonBuiltinVaList: 8903 return CreateHexagonBuiltinVaListDecl(Context); 8904 } 8905 8906 llvm_unreachable("Unhandled __builtin_va_list type kind"); 8907 } 8908 8909 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 8910 if (!BuiltinVaListDecl) { 8911 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 8912 assert(BuiltinVaListDecl->isImplicit()); 8913 } 8914 8915 return BuiltinVaListDecl; 8916 } 8917 8918 Decl *ASTContext::getVaListTagDecl() const { 8919 // Force the creation of VaListTagDecl by building the __builtin_va_list 8920 // declaration. 8921 if (!VaListTagDecl) 8922 (void)getBuiltinVaListDecl(); 8923 8924 return VaListTagDecl; 8925 } 8926 8927 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 8928 if (!BuiltinMSVaListDecl) 8929 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 8930 8931 return BuiltinMSVaListDecl; 8932 } 8933 8934 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 8935 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 8936 } 8937 8938 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 8939 assert(ObjCConstantStringType.isNull() && 8940 "'NSConstantString' type already set!"); 8941 8942 ObjCConstantStringType = getObjCInterfaceType(Decl); 8943 } 8944 8945 /// Retrieve the template name that corresponds to a non-empty 8946 /// lookup. 8947 TemplateName 8948 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 8949 UnresolvedSetIterator End) const { 8950 unsigned size = End - Begin; 8951 assert(size > 1 && "set is not overloaded!"); 8952 8953 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 8954 size * sizeof(FunctionTemplateDecl*)); 8955 auto *OT = new (memory) OverloadedTemplateStorage(size); 8956 8957 NamedDecl **Storage = OT->getStorage(); 8958 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 8959 NamedDecl *D = *I; 8960 assert(isa<FunctionTemplateDecl>(D) || 8961 isa<UnresolvedUsingValueDecl>(D) || 8962 (isa<UsingShadowDecl>(D) && 8963 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 8964 *Storage++ = D; 8965 } 8966 8967 return TemplateName(OT); 8968 } 8969 8970 /// Retrieve a template name representing an unqualified-id that has been 8971 /// assumed to name a template for ADL purposes. 8972 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 8973 auto *OT = new (*this) AssumedTemplateStorage(Name); 8974 return TemplateName(OT); 8975 } 8976 8977 /// Retrieve the template name that represents a qualified 8978 /// template name such as \c std::vector. 8979 TemplateName 8980 ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 8981 bool TemplateKeyword, 8982 TemplateDecl *Template) const { 8983 assert(NNS && "Missing nested-name-specifier in qualified template name"); 8984 8985 // FIXME: Canonicalization? 8986 llvm::FoldingSetNodeID ID; 8987 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 8988 8989 void *InsertPos = nullptr; 8990 QualifiedTemplateName *QTN = 8991 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8992 if (!QTN) { 8993 QTN = new (*this, alignof(QualifiedTemplateName)) 8994 QualifiedTemplateName(NNS, TemplateKeyword, Template); 8995 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 8996 } 8997 8998 return TemplateName(QTN); 8999 } 9000 9001 /// Retrieve the template name that represents a dependent 9002 /// template name such as \c MetaFun::template apply. 9003 TemplateName 9004 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9005 const IdentifierInfo *Name) const { 9006 assert((!NNS || NNS->isDependent()) && 9007 "Nested name specifier must be dependent"); 9008 9009 llvm::FoldingSetNodeID ID; 9010 DependentTemplateName::Profile(ID, NNS, Name); 9011 9012 void *InsertPos = nullptr; 9013 DependentTemplateName *QTN = 9014 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9015 9016 if (QTN) 9017 return TemplateName(QTN); 9018 9019 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9020 if (CanonNNS == NNS) { 9021 QTN = new (*this, alignof(DependentTemplateName)) 9022 DependentTemplateName(NNS, Name); 9023 } else { 9024 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9025 QTN = new (*this, alignof(DependentTemplateName)) 9026 DependentTemplateName(NNS, Name, Canon); 9027 DependentTemplateName *CheckQTN = 9028 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9029 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9030 (void)CheckQTN; 9031 } 9032 9033 DependentTemplateNames.InsertNode(QTN, InsertPos); 9034 return TemplateName(QTN); 9035 } 9036 9037 /// Retrieve the template name that represents a dependent 9038 /// template name such as \c MetaFun::template operator+. 9039 TemplateName 9040 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9041 OverloadedOperatorKind Operator) const { 9042 assert((!NNS || NNS->isDependent()) && 9043 "Nested name specifier must be dependent"); 9044 9045 llvm::FoldingSetNodeID ID; 9046 DependentTemplateName::Profile(ID, NNS, Operator); 9047 9048 void *InsertPos = nullptr; 9049 DependentTemplateName *QTN 9050 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9051 9052 if (QTN) 9053 return TemplateName(QTN); 9054 9055 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9056 if (CanonNNS == NNS) { 9057 QTN = new (*this, alignof(DependentTemplateName)) 9058 DependentTemplateName(NNS, Operator); 9059 } else { 9060 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9061 QTN = new (*this, alignof(DependentTemplateName)) 9062 DependentTemplateName(NNS, Operator, Canon); 9063 9064 DependentTemplateName *CheckQTN 9065 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9066 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9067 (void)CheckQTN; 9068 } 9069 9070 DependentTemplateNames.InsertNode(QTN, InsertPos); 9071 return TemplateName(QTN); 9072 } 9073 9074 TemplateName 9075 ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, 9076 TemplateName replacement) const { 9077 llvm::FoldingSetNodeID ID; 9078 SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); 9079 9080 void *insertPos = nullptr; 9081 SubstTemplateTemplateParmStorage *subst 9082 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9083 9084 if (!subst) { 9085 subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); 9086 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9087 } 9088 9089 return TemplateName(subst); 9090 } 9091 9092 TemplateName 9093 ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, 9094 const TemplateArgument &ArgPack) const { 9095 auto &Self = const_cast<ASTContext &>(*this); 9096 llvm::FoldingSetNodeID ID; 9097 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); 9098 9099 void *InsertPos = nullptr; 9100 SubstTemplateTemplateParmPackStorage *Subst 9101 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9102 9103 if (!Subst) { 9104 Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, 9105 ArgPack.pack_size(), 9106 ArgPack.pack_begin()); 9107 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9108 } 9109 9110 return TemplateName(Subst); 9111 } 9112 9113 /// getFromTargetType - Given one of the integer types provided by 9114 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9115 /// is actually a value of type @c TargetInfo::IntType. 9116 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9117 switch (Type) { 9118 case TargetInfo::NoInt: return {}; 9119 case TargetInfo::SignedChar: return SignedCharTy; 9120 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9121 case TargetInfo::SignedShort: return ShortTy; 9122 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9123 case TargetInfo::SignedInt: return IntTy; 9124 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9125 case TargetInfo::SignedLong: return LongTy; 9126 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9127 case TargetInfo::SignedLongLong: return LongLongTy; 9128 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9129 } 9130 9131 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9132 } 9133 9134 //===----------------------------------------------------------------------===// 9135 // Type Predicates. 9136 //===----------------------------------------------------------------------===// 9137 9138 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9139 /// garbage collection attribute. 9140 /// 9141 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9142 if (getLangOpts().getGC() == LangOptions::NonGC) 9143 return Qualifiers::GCNone; 9144 9145 assert(getLangOpts().ObjC); 9146 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9147 9148 // Default behaviour under objective-C's gc is for ObjC pointers 9149 // (or pointers to them) be treated as though they were declared 9150 // as __strong. 9151 if (GCAttrs == Qualifiers::GCNone) { 9152 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9153 return Qualifiers::Strong; 9154 else if (Ty->isPointerType()) 9155 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9156 } else { 9157 // It's not valid to set GC attributes on anything that isn't a 9158 // pointer. 9159 #ifndef NDEBUG 9160 QualType CT = Ty->getCanonicalTypeInternal(); 9161 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9162 CT = AT->getElementType(); 9163 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9164 #endif 9165 } 9166 return GCAttrs; 9167 } 9168 9169 //===----------------------------------------------------------------------===// 9170 // Type Compatibility Testing 9171 //===----------------------------------------------------------------------===// 9172 9173 /// areCompatVectorTypes - Return true if the two specified vector types are 9174 /// compatible. 9175 static bool areCompatVectorTypes(const VectorType *LHS, 9176 const VectorType *RHS) { 9177 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9178 return LHS->getElementType() == RHS->getElementType() && 9179 LHS->getNumElements() == RHS->getNumElements(); 9180 } 9181 9182 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9183 /// compatible. 9184 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9185 const ConstantMatrixType *RHS) { 9186 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9187 return LHS->getElementType() == RHS->getElementType() && 9188 LHS->getNumRows() == RHS->getNumRows() && 9189 LHS->getNumColumns() == RHS->getNumColumns(); 9190 } 9191 9192 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9193 QualType SecondVec) { 9194 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9195 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9196 9197 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9198 return true; 9199 9200 // Treat Neon vector types and most AltiVec vector types as if they are the 9201 // equivalent GCC vector types. 9202 const auto *First = FirstVec->castAs<VectorType>(); 9203 const auto *Second = SecondVec->castAs<VectorType>(); 9204 if (First->getNumElements() == Second->getNumElements() && 9205 hasSameType(First->getElementType(), Second->getElementType()) && 9206 First->getVectorKind() != VectorType::AltiVecPixel && 9207 First->getVectorKind() != VectorType::AltiVecBool && 9208 Second->getVectorKind() != VectorType::AltiVecPixel && 9209 Second->getVectorKind() != VectorType::AltiVecBool && 9210 First->getVectorKind() != VectorType::SveFixedLengthDataVector && 9211 First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && 9212 Second->getVectorKind() != VectorType::SveFixedLengthDataVector && 9213 Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) 9214 return true; 9215 9216 return false; 9217 } 9218 9219 /// getSVETypeSize - Return SVE vector or predicate register size. 9220 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9221 assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); 9222 return Ty->getKind() == BuiltinType::SveBool 9223 ? (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth() 9224 : Context.getLangOpts().VScaleMin * 128; 9225 } 9226 9227 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9228 QualType SecondType) { 9229 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 9230 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 9231 "Expected SVE builtin type and vector type!"); 9232 9233 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9234 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9235 if (const auto *VT = SecondType->getAs<VectorType>()) { 9236 // Predicates have the same representation as uint8 so we also have to 9237 // check the kind to make these types incompatible. 9238 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 9239 return BT->getKind() == BuiltinType::SveBool; 9240 else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 9241 return VT->getElementType().getCanonicalType() == 9242 FirstType->getSveEltType(*this); 9243 else if (VT->getVectorKind() == VectorType::GenericVector) 9244 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9245 hasSameType(VT->getElementType(), 9246 getBuiltinVectorTypeInfo(BT).ElementType); 9247 } 9248 } 9249 return false; 9250 }; 9251 9252 return IsValidCast(FirstType, SecondType) || 9253 IsValidCast(SecondType, FirstType); 9254 } 9255 9256 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9257 QualType SecondType) { 9258 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 9259 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 9260 "Expected SVE builtin type and vector type!"); 9261 9262 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9263 const auto *BT = FirstType->getAs<BuiltinType>(); 9264 if (!BT) 9265 return false; 9266 9267 const auto *VecTy = SecondType->getAs<VectorType>(); 9268 if (VecTy && 9269 (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || 9270 VecTy->getVectorKind() == VectorType::GenericVector)) { 9271 const LangOptions::LaxVectorConversionKind LVCKind = 9272 getLangOpts().getLaxVectorConversions(); 9273 9274 // Can not convert between sve predicates and sve vectors because of 9275 // different size. 9276 if (BT->getKind() == BuiltinType::SveBool && 9277 VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) 9278 return false; 9279 9280 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9281 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9282 // converts to VLAT and VLAT implicitly converts to GNUT." 9283 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9284 // predicates. 9285 if (VecTy->getVectorKind() == VectorType::GenericVector && 9286 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9287 return false; 9288 9289 // If -flax-vector-conversions=all is specified, the types are 9290 // certainly compatible. 9291 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9292 return true; 9293 9294 // If -flax-vector-conversions=integer is specified, the types are 9295 // compatible if the elements are integer types. 9296 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9297 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9298 FirstType->getSveEltType(*this)->isIntegerType(); 9299 } 9300 9301 return false; 9302 }; 9303 9304 return IsLaxCompatible(FirstType, SecondType) || 9305 IsLaxCompatible(SecondType, FirstType); 9306 } 9307 9308 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9309 while (true) { 9310 // __strong id 9311 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9312 if (Attr->getAttrKind() == attr::ObjCOwnership) 9313 return true; 9314 9315 Ty = Attr->getModifiedType(); 9316 9317 // X *__strong (...) 9318 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9319 Ty = Paren->getInnerType(); 9320 9321 // We do not want to look through typedefs, typeof(expr), 9322 // typeof(type), or any other way that the type is somehow 9323 // abstracted. 9324 } else { 9325 return false; 9326 } 9327 } 9328 } 9329 9330 //===----------------------------------------------------------------------===// 9331 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9332 //===----------------------------------------------------------------------===// 9333 9334 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9335 /// inheritance hierarchy of 'rProto'. 9336 bool 9337 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9338 ObjCProtocolDecl *rProto) const { 9339 if (declaresSameEntity(lProto, rProto)) 9340 return true; 9341 for (auto *PI : rProto->protocols()) 9342 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9343 return true; 9344 return false; 9345 } 9346 9347 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9348 /// Class<pr1, ...>. 9349 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9350 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9351 for (auto *lhsProto : lhs->quals()) { 9352 bool match = false; 9353 for (auto *rhsProto : rhs->quals()) { 9354 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9355 match = true; 9356 break; 9357 } 9358 } 9359 if (!match) 9360 return false; 9361 } 9362 return true; 9363 } 9364 9365 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9366 /// ObjCQualifiedIDType. 9367 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9368 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9369 bool compare) { 9370 // Allow id<P..> and an 'id' in all cases. 9371 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9372 return true; 9373 9374 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9375 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9376 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9377 return false; 9378 9379 if (lhs->isObjCQualifiedIdType()) { 9380 if (rhs->qual_empty()) { 9381 // If the RHS is a unqualified interface pointer "NSString*", 9382 // make sure we check the class hierarchy. 9383 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9384 for (auto *I : lhs->quals()) { 9385 // when comparing an id<P> on lhs with a static type on rhs, 9386 // see if static class implements all of id's protocols, directly or 9387 // through its super class and categories. 9388 if (!rhsID->ClassImplementsProtocol(I, true)) 9389 return false; 9390 } 9391 } 9392 // If there are no qualifiers and no interface, we have an 'id'. 9393 return true; 9394 } 9395 // Both the right and left sides have qualifiers. 9396 for (auto *lhsProto : lhs->quals()) { 9397 bool match = false; 9398 9399 // when comparing an id<P> on lhs with a static type on rhs, 9400 // see if static class implements all of id's protocols, directly or 9401 // through its super class and categories. 9402 for (auto *rhsProto : rhs->quals()) { 9403 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9404 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9405 match = true; 9406 break; 9407 } 9408 } 9409 // If the RHS is a qualified interface pointer "NSString<P>*", 9410 // make sure we check the class hierarchy. 9411 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9412 for (auto *I : lhs->quals()) { 9413 // when comparing an id<P> on lhs with a static type on rhs, 9414 // see if static class implements all of id's protocols, directly or 9415 // through its super class and categories. 9416 if (rhsID->ClassImplementsProtocol(I, true)) { 9417 match = true; 9418 break; 9419 } 9420 } 9421 } 9422 if (!match) 9423 return false; 9424 } 9425 9426 return true; 9427 } 9428 9429 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9430 9431 if (lhs->getInterfaceType()) { 9432 // If both the right and left sides have qualifiers. 9433 for (auto *lhsProto : lhs->quals()) { 9434 bool match = false; 9435 9436 // when comparing an id<P> on rhs with a static type on lhs, 9437 // see if static class implements all of id's protocols, directly or 9438 // through its super class and categories. 9439 // First, lhs protocols in the qualifier list must be found, direct 9440 // or indirect in rhs's qualifier list or it is a mismatch. 9441 for (auto *rhsProto : rhs->quals()) { 9442 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9443 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9444 match = true; 9445 break; 9446 } 9447 } 9448 if (!match) 9449 return false; 9450 } 9451 9452 // Static class's protocols, or its super class or category protocols 9453 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9454 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9455 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9456 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9457 // This is rather dubious but matches gcc's behavior. If lhs has 9458 // no type qualifier and its class has no static protocol(s) 9459 // assume that it is mismatch. 9460 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9461 return false; 9462 for (auto *lhsProto : LHSInheritedProtocols) { 9463 bool match = false; 9464 for (auto *rhsProto : rhs->quals()) { 9465 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9466 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9467 match = true; 9468 break; 9469 } 9470 } 9471 if (!match) 9472 return false; 9473 } 9474 } 9475 return true; 9476 } 9477 return false; 9478 } 9479 9480 /// canAssignObjCInterfaces - Return true if the two interface types are 9481 /// compatible for assignment from RHS to LHS. This handles validation of any 9482 /// protocol qualifiers on the LHS or RHS. 9483 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9484 const ObjCObjectPointerType *RHSOPT) { 9485 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9486 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9487 9488 // If either type represents the built-in 'id' type, return true. 9489 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9490 return true; 9491 9492 // Function object that propagates a successful result or handles 9493 // __kindof types. 9494 auto finish = [&](bool succeeded) -> bool { 9495 if (succeeded) 9496 return true; 9497 9498 if (!RHS->isKindOfType()) 9499 return false; 9500 9501 // Strip off __kindof and protocol qualifiers, then check whether 9502 // we can assign the other way. 9503 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9504 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9505 }; 9506 9507 // Casts from or to id<P> are allowed when the other side has compatible 9508 // protocols. 9509 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9510 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9511 } 9512 9513 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9514 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9515 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9516 } 9517 9518 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9519 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9520 return true; 9521 } 9522 9523 // If we have 2 user-defined types, fall into that path. 9524 if (LHS->getInterface() && RHS->getInterface()) { 9525 return finish(canAssignObjCInterfaces(LHS, RHS)); 9526 } 9527 9528 return false; 9529 } 9530 9531 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9532 /// for providing type-safety for objective-c pointers used to pass/return 9533 /// arguments in block literals. When passed as arguments, passing 'A*' where 9534 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9535 /// not OK. For the return type, the opposite is not OK. 9536 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9537 const ObjCObjectPointerType *LHSOPT, 9538 const ObjCObjectPointerType *RHSOPT, 9539 bool BlockReturnType) { 9540 9541 // Function object that propagates a successful result or handles 9542 // __kindof types. 9543 auto finish = [&](bool succeeded) -> bool { 9544 if (succeeded) 9545 return true; 9546 9547 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9548 if (!Expected->isKindOfType()) 9549 return false; 9550 9551 // Strip off __kindof and protocol qualifiers, then check whether 9552 // we can assign the other way. 9553 return canAssignObjCInterfacesInBlockPointer( 9554 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9555 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9556 BlockReturnType); 9557 }; 9558 9559 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9560 return true; 9561 9562 if (LHSOPT->isObjCBuiltinType()) { 9563 return finish(RHSOPT->isObjCBuiltinType() || 9564 RHSOPT->isObjCQualifiedIdType()); 9565 } 9566 9567 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9568 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9569 // Use for block parameters previous type checking for compatibility. 9570 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9571 // Or corrected type checking as in non-compat mode. 9572 (!BlockReturnType && 9573 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9574 else 9575 return finish(ObjCQualifiedIdTypesAreCompatible( 9576 (BlockReturnType ? LHSOPT : RHSOPT), 9577 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9578 } 9579 9580 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9581 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9582 if (LHS && RHS) { // We have 2 user-defined types. 9583 if (LHS != RHS) { 9584 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9585 return finish(BlockReturnType); 9586 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9587 return finish(!BlockReturnType); 9588 } 9589 else 9590 return true; 9591 } 9592 return false; 9593 } 9594 9595 /// Comparison routine for Objective-C protocols to be used with 9596 /// llvm::array_pod_sort. 9597 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9598 ObjCProtocolDecl * const *rhs) { 9599 return (*lhs)->getName().compare((*rhs)->getName()); 9600 } 9601 9602 /// getIntersectionOfProtocols - This routine finds the intersection of set 9603 /// of protocols inherited from two distinct objective-c pointer objects with 9604 /// the given common base. 9605 /// It is used to build composite qualifier list of the composite type of 9606 /// the conditional expression involving two objective-c pointer objects. 9607 static 9608 void getIntersectionOfProtocols(ASTContext &Context, 9609 const ObjCInterfaceDecl *CommonBase, 9610 const ObjCObjectPointerType *LHSOPT, 9611 const ObjCObjectPointerType *RHSOPT, 9612 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9613 9614 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9615 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9616 assert(LHS->getInterface() && "LHS must have an interface base"); 9617 assert(RHS->getInterface() && "RHS must have an interface base"); 9618 9619 // Add all of the protocols for the LHS. 9620 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9621 9622 // Start with the protocol qualifiers. 9623 for (auto proto : LHS->quals()) { 9624 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9625 } 9626 9627 // Also add the protocols associated with the LHS interface. 9628 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9629 9630 // Add all of the protocols for the RHS. 9631 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9632 9633 // Start with the protocol qualifiers. 9634 for (auto proto : RHS->quals()) { 9635 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9636 } 9637 9638 // Also add the protocols associated with the RHS interface. 9639 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9640 9641 // Compute the intersection of the collected protocol sets. 9642 for (auto proto : LHSProtocolSet) { 9643 if (RHSProtocolSet.count(proto)) 9644 IntersectionSet.push_back(proto); 9645 } 9646 9647 // Compute the set of protocols that is implied by either the common type or 9648 // the protocols within the intersection. 9649 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9650 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9651 9652 // Remove any implied protocols from the list of inherited protocols. 9653 if (!ImpliedProtocols.empty()) { 9654 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9655 return ImpliedProtocols.contains(proto); 9656 }); 9657 } 9658 9659 // Sort the remaining protocols by name. 9660 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9661 compareObjCProtocolsByName); 9662 } 9663 9664 /// Determine whether the first type is a subtype of the second. 9665 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9666 QualType rhs) { 9667 // Common case: two object pointers. 9668 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9669 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9670 if (lhsOPT && rhsOPT) 9671 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9672 9673 // Two block pointers. 9674 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9675 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9676 if (lhsBlock && rhsBlock) 9677 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9678 9679 // If either is an unqualified 'id' and the other is a block, it's 9680 // acceptable. 9681 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9682 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9683 return true; 9684 9685 return false; 9686 } 9687 9688 // Check that the given Objective-C type argument lists are equivalent. 9689 static bool sameObjCTypeArgs(ASTContext &ctx, 9690 const ObjCInterfaceDecl *iface, 9691 ArrayRef<QualType> lhsArgs, 9692 ArrayRef<QualType> rhsArgs, 9693 bool stripKindOf) { 9694 if (lhsArgs.size() != rhsArgs.size()) 9695 return false; 9696 9697 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9698 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9699 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9700 continue; 9701 9702 switch (typeParams->begin()[i]->getVariance()) { 9703 case ObjCTypeParamVariance::Invariant: 9704 if (!stripKindOf || 9705 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9706 rhsArgs[i].stripObjCKindOfType(ctx))) { 9707 return false; 9708 } 9709 break; 9710 9711 case ObjCTypeParamVariance::Covariant: 9712 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 9713 return false; 9714 break; 9715 9716 case ObjCTypeParamVariance::Contravariant: 9717 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 9718 return false; 9719 break; 9720 } 9721 } 9722 9723 return true; 9724 } 9725 9726 QualType ASTContext::areCommonBaseCompatible( 9727 const ObjCObjectPointerType *Lptr, 9728 const ObjCObjectPointerType *Rptr) { 9729 const ObjCObjectType *LHS = Lptr->getObjectType(); 9730 const ObjCObjectType *RHS = Rptr->getObjectType(); 9731 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 9732 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 9733 9734 if (!LDecl || !RDecl) 9735 return {}; 9736 9737 // When either LHS or RHS is a kindof type, we should return a kindof type. 9738 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 9739 // kindof(A). 9740 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 9741 9742 // Follow the left-hand side up the class hierarchy until we either hit a 9743 // root or find the RHS. Record the ancestors in case we don't find it. 9744 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 9745 LHSAncestors; 9746 while (true) { 9747 // Record this ancestor. We'll need this if the common type isn't in the 9748 // path from the LHS to the root. 9749 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 9750 9751 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 9752 // Get the type arguments. 9753 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 9754 bool anyChanges = false; 9755 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9756 // Both have type arguments, compare them. 9757 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9758 LHS->getTypeArgs(), RHS->getTypeArgs(), 9759 /*stripKindOf=*/true)) 9760 return {}; 9761 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9762 // If only one has type arguments, the result will not have type 9763 // arguments. 9764 LHSTypeArgs = {}; 9765 anyChanges = true; 9766 } 9767 9768 // Compute the intersection of protocols. 9769 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9770 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 9771 Protocols); 9772 if (!Protocols.empty()) 9773 anyChanges = true; 9774 9775 // If anything in the LHS will have changed, build a new result type. 9776 // If we need to return a kindof type but LHS is not a kindof type, we 9777 // build a new result type. 9778 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 9779 QualType Result = getObjCInterfaceType(LHS->getInterface()); 9780 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 9781 anyKindOf || LHS->isKindOfType()); 9782 return getObjCObjectPointerType(Result); 9783 } 9784 9785 return getObjCObjectPointerType(QualType(LHS, 0)); 9786 } 9787 9788 // Find the superclass. 9789 QualType LHSSuperType = LHS->getSuperClassType(); 9790 if (LHSSuperType.isNull()) 9791 break; 9792 9793 LHS = LHSSuperType->castAs<ObjCObjectType>(); 9794 } 9795 9796 // We didn't find anything by following the LHS to its root; now check 9797 // the RHS against the cached set of ancestors. 9798 while (true) { 9799 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 9800 if (KnownLHS != LHSAncestors.end()) { 9801 LHS = KnownLHS->second; 9802 9803 // Get the type arguments. 9804 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 9805 bool anyChanges = false; 9806 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9807 // Both have type arguments, compare them. 9808 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9809 LHS->getTypeArgs(), RHS->getTypeArgs(), 9810 /*stripKindOf=*/true)) 9811 return {}; 9812 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9813 // If only one has type arguments, the result will not have type 9814 // arguments. 9815 RHSTypeArgs = {}; 9816 anyChanges = true; 9817 } 9818 9819 // Compute the intersection of protocols. 9820 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9821 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 9822 Protocols); 9823 if (!Protocols.empty()) 9824 anyChanges = true; 9825 9826 // If we need to return a kindof type but RHS is not a kindof type, we 9827 // build a new result type. 9828 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 9829 QualType Result = getObjCInterfaceType(RHS->getInterface()); 9830 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 9831 anyKindOf || RHS->isKindOfType()); 9832 return getObjCObjectPointerType(Result); 9833 } 9834 9835 return getObjCObjectPointerType(QualType(RHS, 0)); 9836 } 9837 9838 // Find the superclass of the RHS. 9839 QualType RHSSuperType = RHS->getSuperClassType(); 9840 if (RHSSuperType.isNull()) 9841 break; 9842 9843 RHS = RHSSuperType->castAs<ObjCObjectType>(); 9844 } 9845 9846 return {}; 9847 } 9848 9849 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 9850 const ObjCObjectType *RHS) { 9851 assert(LHS->getInterface() && "LHS is not an interface type"); 9852 assert(RHS->getInterface() && "RHS is not an interface type"); 9853 9854 // Verify that the base decls are compatible: the RHS must be a subclass of 9855 // the LHS. 9856 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 9857 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 9858 if (!IsSuperClass) 9859 return false; 9860 9861 // If the LHS has protocol qualifiers, determine whether all of them are 9862 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 9863 // LHS). 9864 if (LHS->getNumProtocols() > 0) { 9865 // OK if conversion of LHS to SuperClass results in narrowing of types 9866 // ; i.e., SuperClass may implement at least one of the protocols 9867 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 9868 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 9869 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 9870 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 9871 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 9872 // qualifiers. 9873 for (auto *RHSPI : RHS->quals()) 9874 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 9875 // If there is no protocols associated with RHS, it is not a match. 9876 if (SuperClassInheritedProtocols.empty()) 9877 return false; 9878 9879 for (const auto *LHSProto : LHS->quals()) { 9880 bool SuperImplementsProtocol = false; 9881 for (auto *SuperClassProto : SuperClassInheritedProtocols) 9882 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 9883 SuperImplementsProtocol = true; 9884 break; 9885 } 9886 if (!SuperImplementsProtocol) 9887 return false; 9888 } 9889 } 9890 9891 // If the LHS is specialized, we may need to check type arguments. 9892 if (LHS->isSpecialized()) { 9893 // Follow the superclass chain until we've matched the LHS class in the 9894 // hierarchy. This substitutes type arguments through. 9895 const ObjCObjectType *RHSSuper = RHS; 9896 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 9897 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 9898 9899 // If the RHS is specializd, compare type arguments. 9900 if (RHSSuper->isSpecialized() && 9901 !sameObjCTypeArgs(*this, LHS->getInterface(), 9902 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 9903 /*stripKindOf=*/true)) { 9904 return false; 9905 } 9906 } 9907 9908 return true; 9909 } 9910 9911 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 9912 // get the "pointed to" types 9913 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 9914 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 9915 9916 if (!LHSOPT || !RHSOPT) 9917 return false; 9918 9919 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 9920 canAssignObjCInterfaces(RHSOPT, LHSOPT); 9921 } 9922 9923 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 9924 return canAssignObjCInterfaces( 9925 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 9926 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 9927 } 9928 9929 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 9930 /// both shall have the identically qualified version of a compatible type. 9931 /// C99 6.2.7p1: Two types have compatible types if their types are the 9932 /// same. See 6.7.[2,3,5] for additional rules. 9933 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 9934 bool CompareUnqualified) { 9935 if (getLangOpts().CPlusPlus) 9936 return hasSameType(LHS, RHS); 9937 9938 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 9939 } 9940 9941 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 9942 return typesAreCompatible(LHS, RHS); 9943 } 9944 9945 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 9946 return !mergeTypes(LHS, RHS, true).isNull(); 9947 } 9948 9949 /// mergeTransparentUnionType - if T is a transparent union type and a member 9950 /// of T is compatible with SubType, return the merged type, else return 9951 /// QualType() 9952 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 9953 bool OfBlockPointer, 9954 bool Unqualified) { 9955 if (const RecordType *UT = T->getAsUnionType()) { 9956 RecordDecl *UD = UT->getDecl(); 9957 if (UD->hasAttr<TransparentUnionAttr>()) { 9958 for (const auto *I : UD->fields()) { 9959 QualType ET = I->getType().getUnqualifiedType(); 9960 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 9961 if (!MT.isNull()) 9962 return MT; 9963 } 9964 } 9965 } 9966 9967 return {}; 9968 } 9969 9970 /// mergeFunctionParameterTypes - merge two types which appear as function 9971 /// parameter types 9972 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 9973 bool OfBlockPointer, 9974 bool Unqualified) { 9975 // GNU extension: two types are compatible if they appear as a function 9976 // argument, one of the types is a transparent union type and the other 9977 // type is compatible with a union member 9978 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 9979 Unqualified); 9980 if (!lmerge.isNull()) 9981 return lmerge; 9982 9983 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 9984 Unqualified); 9985 if (!rmerge.isNull()) 9986 return rmerge; 9987 9988 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 9989 } 9990 9991 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 9992 bool OfBlockPointer, bool Unqualified, 9993 bool AllowCXX) { 9994 const auto *lbase = lhs->castAs<FunctionType>(); 9995 const auto *rbase = rhs->castAs<FunctionType>(); 9996 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 9997 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 9998 bool allLTypes = true; 9999 bool allRTypes = true; 10000 10001 // Check return type 10002 QualType retType; 10003 if (OfBlockPointer) { 10004 QualType RHS = rbase->getReturnType(); 10005 QualType LHS = lbase->getReturnType(); 10006 bool UnqualifiedResult = Unqualified; 10007 if (!UnqualifiedResult) 10008 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10009 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10010 } 10011 else 10012 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10013 Unqualified); 10014 if (retType.isNull()) 10015 return {}; 10016 10017 if (Unqualified) 10018 retType = retType.getUnqualifiedType(); 10019 10020 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10021 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10022 if (Unqualified) { 10023 LRetType = LRetType.getUnqualifiedType(); 10024 RRetType = RRetType.getUnqualifiedType(); 10025 } 10026 10027 if (getCanonicalType(retType) != LRetType) 10028 allLTypes = false; 10029 if (getCanonicalType(retType) != RRetType) 10030 allRTypes = false; 10031 10032 // FIXME: double check this 10033 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10034 // rbase->getRegParmAttr() != 0 && 10035 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10036 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10037 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10038 10039 // Compatible functions must have compatible calling conventions 10040 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10041 return {}; 10042 10043 // Regparm is part of the calling convention. 10044 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10045 return {}; 10046 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10047 return {}; 10048 10049 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10050 return {}; 10051 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10052 return {}; 10053 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10054 return {}; 10055 10056 // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. 10057 bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10058 10059 if (lbaseInfo.getNoReturn() != NoReturn) 10060 allLTypes = false; 10061 if (rbaseInfo.getNoReturn() != NoReturn) 10062 allRTypes = false; 10063 10064 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10065 10066 if (lproto && rproto) { // two C99 style function prototypes 10067 assert((AllowCXX || 10068 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10069 "C++ shouldn't be here"); 10070 // Compatible functions must have the same number of parameters 10071 if (lproto->getNumParams() != rproto->getNumParams()) 10072 return {}; 10073 10074 // Variadic and non-variadic functions aren't compatible 10075 if (lproto->isVariadic() != rproto->isVariadic()) 10076 return {}; 10077 10078 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10079 return {}; 10080 10081 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10082 bool canUseLeft, canUseRight; 10083 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10084 newParamInfos)) 10085 return {}; 10086 10087 if (!canUseLeft) 10088 allLTypes = false; 10089 if (!canUseRight) 10090 allRTypes = false; 10091 10092 // Check parameter type compatibility 10093 SmallVector<QualType, 10> types; 10094 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10095 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10096 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10097 QualType paramType = mergeFunctionParameterTypes( 10098 lParamType, rParamType, OfBlockPointer, Unqualified); 10099 if (paramType.isNull()) 10100 return {}; 10101 10102 if (Unqualified) 10103 paramType = paramType.getUnqualifiedType(); 10104 10105 types.push_back(paramType); 10106 if (Unqualified) { 10107 lParamType = lParamType.getUnqualifiedType(); 10108 rParamType = rParamType.getUnqualifiedType(); 10109 } 10110 10111 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10112 allLTypes = false; 10113 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10114 allRTypes = false; 10115 } 10116 10117 if (allLTypes) return lhs; 10118 if (allRTypes) return rhs; 10119 10120 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10121 EPI.ExtInfo = einfo; 10122 EPI.ExtParameterInfos = 10123 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10124 return getFunctionType(retType, types, EPI); 10125 } 10126 10127 if (lproto) allRTypes = false; 10128 if (rproto) allLTypes = false; 10129 10130 const FunctionProtoType *proto = lproto ? lproto : rproto; 10131 if (proto) { 10132 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10133 if (proto->isVariadic()) 10134 return {}; 10135 // Check that the types are compatible with the types that 10136 // would result from default argument promotions (C99 6.7.5.3p15). 10137 // The only types actually affected are promotable integer 10138 // types and floats, which would be passed as a different 10139 // type depending on whether the prototype is visible. 10140 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10141 QualType paramTy = proto->getParamType(i); 10142 10143 // Look at the converted type of enum types, since that is the type used 10144 // to pass enum values. 10145 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10146 paramTy = Enum->getDecl()->getIntegerType(); 10147 if (paramTy.isNull()) 10148 return {}; 10149 } 10150 10151 if (paramTy->isPromotableIntegerType() || 10152 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10153 return {}; 10154 } 10155 10156 if (allLTypes) return lhs; 10157 if (allRTypes) return rhs; 10158 10159 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10160 EPI.ExtInfo = einfo; 10161 return getFunctionType(retType, proto->getParamTypes(), EPI); 10162 } 10163 10164 if (allLTypes) return lhs; 10165 if (allRTypes) return rhs; 10166 return getFunctionNoProtoType(retType, einfo); 10167 } 10168 10169 /// Given that we have an enum type and a non-enum type, try to merge them. 10170 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10171 QualType other, bool isBlockReturnType) { 10172 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10173 // a signed integer type, or an unsigned integer type. 10174 // Compatibility is based on the underlying type, not the promotion 10175 // type. 10176 QualType underlyingType = ET->getDecl()->getIntegerType(); 10177 if (underlyingType.isNull()) 10178 return {}; 10179 if (Context.hasSameType(underlyingType, other)) 10180 return other; 10181 10182 // In block return types, we're more permissive and accept any 10183 // integral type of the same size. 10184 if (isBlockReturnType && other->isIntegerType() && 10185 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10186 return other; 10187 10188 return {}; 10189 } 10190 10191 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, 10192 bool OfBlockPointer, 10193 bool Unqualified, bool BlockReturnType) { 10194 // For C++ we will not reach this code with reference types (see below), 10195 // for OpenMP variant call overloading we might. 10196 // 10197 // C++ [expr]: If an expression initially has the type "reference to T", the 10198 // type is adjusted to "T" prior to any further analysis, the expression 10199 // designates the object or function denoted by the reference, and the 10200 // expression is an lvalue unless the reference is an rvalue reference and 10201 // the expression is a function call (possibly inside parentheses). 10202 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10203 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10204 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10205 LHS->getTypeClass() == RHS->getTypeClass()) 10206 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10207 OfBlockPointer, Unqualified, BlockReturnType); 10208 if (LHSRefTy || RHSRefTy) 10209 return {}; 10210 10211 if (Unqualified) { 10212 LHS = LHS.getUnqualifiedType(); 10213 RHS = RHS.getUnqualifiedType(); 10214 } 10215 10216 QualType LHSCan = getCanonicalType(LHS), 10217 RHSCan = getCanonicalType(RHS); 10218 10219 // If two types are identical, they are compatible. 10220 if (LHSCan == RHSCan) 10221 return LHS; 10222 10223 // If the qualifiers are different, the types aren't compatible... mostly. 10224 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10225 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10226 if (LQuals != RQuals) { 10227 // If any of these qualifiers are different, we have a type 10228 // mismatch. 10229 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10230 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10231 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10232 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10233 return {}; 10234 10235 // Exactly one GC qualifier difference is allowed: __strong is 10236 // okay if the other type has no GC qualifier but is an Objective 10237 // C object pointer (i.e. implicitly strong by default). We fix 10238 // this by pretending that the unqualified type was actually 10239 // qualified __strong. 10240 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10241 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10242 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10243 10244 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10245 return {}; 10246 10247 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10248 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10249 } 10250 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10251 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10252 } 10253 return {}; 10254 } 10255 10256 // Okay, qualifiers are equal. 10257 10258 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10259 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10260 10261 // We want to consider the two function types to be the same for these 10262 // comparisons, just force one to the other. 10263 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10264 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10265 10266 // Same as above for arrays 10267 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10268 LHSClass = Type::ConstantArray; 10269 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10270 RHSClass = Type::ConstantArray; 10271 10272 // ObjCInterfaces are just specialized ObjCObjects. 10273 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10274 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10275 10276 // Canonicalize ExtVector -> Vector. 10277 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10278 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10279 10280 // If the canonical type classes don't match. 10281 if (LHSClass != RHSClass) { 10282 // Note that we only have special rules for turning block enum 10283 // returns into block int returns, not vice-versa. 10284 if (const auto *ETy = LHS->getAs<EnumType>()) { 10285 return mergeEnumWithInteger(*this, ETy, RHS, false); 10286 } 10287 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10288 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10289 } 10290 // allow block pointer type to match an 'id' type. 10291 if (OfBlockPointer && !BlockReturnType) { 10292 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10293 return LHS; 10294 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10295 return RHS; 10296 } 10297 10298 return {}; 10299 } 10300 10301 // The canonical type classes match. 10302 switch (LHSClass) { 10303 #define TYPE(Class, Base) 10304 #define ABSTRACT_TYPE(Class, Base) 10305 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10306 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10307 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10308 #include "clang/AST/TypeNodes.inc" 10309 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10310 10311 case Type::Auto: 10312 case Type::DeducedTemplateSpecialization: 10313 case Type::LValueReference: 10314 case Type::RValueReference: 10315 case Type::MemberPointer: 10316 llvm_unreachable("C++ should never be in mergeTypes"); 10317 10318 case Type::ObjCInterface: 10319 case Type::IncompleteArray: 10320 case Type::VariableArray: 10321 case Type::FunctionProto: 10322 case Type::ExtVector: 10323 llvm_unreachable("Types are eliminated above"); 10324 10325 case Type::Pointer: 10326 { 10327 // Merge two pointer types, while trying to preserve typedef info 10328 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10329 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10330 if (Unqualified) { 10331 LHSPointee = LHSPointee.getUnqualifiedType(); 10332 RHSPointee = RHSPointee.getUnqualifiedType(); 10333 } 10334 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10335 Unqualified); 10336 if (ResultType.isNull()) 10337 return {}; 10338 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10339 return LHS; 10340 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10341 return RHS; 10342 return getPointerType(ResultType); 10343 } 10344 case Type::BlockPointer: 10345 { 10346 // Merge two block pointer types, while trying to preserve typedef info 10347 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10348 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10349 if (Unqualified) { 10350 LHSPointee = LHSPointee.getUnqualifiedType(); 10351 RHSPointee = RHSPointee.getUnqualifiedType(); 10352 } 10353 if (getLangOpts().OpenCL) { 10354 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10355 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10356 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10357 // 6.12.5) thus the following check is asymmetric. 10358 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10359 return {}; 10360 LHSPteeQual.removeAddressSpace(); 10361 RHSPteeQual.removeAddressSpace(); 10362 LHSPointee = 10363 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10364 RHSPointee = 10365 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10366 } 10367 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10368 Unqualified); 10369 if (ResultType.isNull()) 10370 return {}; 10371 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10372 return LHS; 10373 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10374 return RHS; 10375 return getBlockPointerType(ResultType); 10376 } 10377 case Type::Atomic: 10378 { 10379 // Merge two pointer types, while trying to preserve typedef info 10380 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10381 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10382 if (Unqualified) { 10383 LHSValue = LHSValue.getUnqualifiedType(); 10384 RHSValue = RHSValue.getUnqualifiedType(); 10385 } 10386 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10387 Unqualified); 10388 if (ResultType.isNull()) 10389 return {}; 10390 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10391 return LHS; 10392 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10393 return RHS; 10394 return getAtomicType(ResultType); 10395 } 10396 case Type::ConstantArray: 10397 { 10398 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10399 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10400 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10401 return {}; 10402 10403 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10404 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10405 if (Unqualified) { 10406 LHSElem = LHSElem.getUnqualifiedType(); 10407 RHSElem = RHSElem.getUnqualifiedType(); 10408 } 10409 10410 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10411 if (ResultType.isNull()) 10412 return {}; 10413 10414 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10415 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10416 10417 // If either side is a variable array, and both are complete, check whether 10418 // the current dimension is definite. 10419 if (LVAT || RVAT) { 10420 auto SizeFetch = [this](const VariableArrayType* VAT, 10421 const ConstantArrayType* CAT) 10422 -> std::pair<bool,llvm::APInt> { 10423 if (VAT) { 10424 Optional<llvm::APSInt> TheInt; 10425 Expr *E = VAT->getSizeExpr(); 10426 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10427 return std::make_pair(true, *TheInt); 10428 return std::make_pair(false, llvm::APSInt()); 10429 } 10430 if (CAT) 10431 return std::make_pair(true, CAT->getSize()); 10432 return std::make_pair(false, llvm::APInt()); 10433 }; 10434 10435 bool HaveLSize, HaveRSize; 10436 llvm::APInt LSize, RSize; 10437 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10438 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10439 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10440 return {}; // Definite, but unequal, array dimension 10441 } 10442 10443 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10444 return LHS; 10445 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10446 return RHS; 10447 if (LCAT) 10448 return getConstantArrayType(ResultType, LCAT->getSize(), 10449 LCAT->getSizeExpr(), 10450 ArrayType::ArraySizeModifier(), 0); 10451 if (RCAT) 10452 return getConstantArrayType(ResultType, RCAT->getSize(), 10453 RCAT->getSizeExpr(), 10454 ArrayType::ArraySizeModifier(), 0); 10455 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10456 return LHS; 10457 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10458 return RHS; 10459 if (LVAT) { 10460 // FIXME: This isn't correct! But tricky to implement because 10461 // the array's size has to be the size of LHS, but the type 10462 // has to be different. 10463 return LHS; 10464 } 10465 if (RVAT) { 10466 // FIXME: This isn't correct! But tricky to implement because 10467 // the array's size has to be the size of RHS, but the type 10468 // has to be different. 10469 return RHS; 10470 } 10471 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10472 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10473 return getIncompleteArrayType(ResultType, 10474 ArrayType::ArraySizeModifier(), 0); 10475 } 10476 case Type::FunctionNoProto: 10477 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); 10478 case Type::Record: 10479 case Type::Enum: 10480 return {}; 10481 case Type::Builtin: 10482 // Only exactly equal builtin types are compatible, which is tested above. 10483 return {}; 10484 case Type::Complex: 10485 // Distinct complex types are incompatible. 10486 return {}; 10487 case Type::Vector: 10488 // FIXME: The merged type should be an ExtVector! 10489 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10490 RHSCan->castAs<VectorType>())) 10491 return LHS; 10492 return {}; 10493 case Type::ConstantMatrix: 10494 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10495 RHSCan->castAs<ConstantMatrixType>())) 10496 return LHS; 10497 return {}; 10498 case Type::ObjCObject: { 10499 // Check if the types are assignment compatible. 10500 // FIXME: This should be type compatibility, e.g. whether 10501 // "LHS x; RHS x;" at global scope is legal. 10502 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10503 RHS->castAs<ObjCObjectType>())) 10504 return LHS; 10505 return {}; 10506 } 10507 case Type::ObjCObjectPointer: 10508 if (OfBlockPointer) { 10509 if (canAssignObjCInterfacesInBlockPointer( 10510 LHS->castAs<ObjCObjectPointerType>(), 10511 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10512 return LHS; 10513 return {}; 10514 } 10515 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10516 RHS->castAs<ObjCObjectPointerType>())) 10517 return LHS; 10518 return {}; 10519 case Type::Pipe: 10520 assert(LHS != RHS && 10521 "Equivalent pipe types should have already been handled!"); 10522 return {}; 10523 case Type::BitInt: { 10524 // Merge two bit-precise int types, while trying to preserve typedef info. 10525 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10526 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10527 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10528 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10529 10530 // Like unsigned/int, shouldn't have a type if they don't match. 10531 if (LHSUnsigned != RHSUnsigned) 10532 return {}; 10533 10534 if (LHSBits != RHSBits) 10535 return {}; 10536 return LHS; 10537 } 10538 } 10539 10540 llvm_unreachable("Invalid Type::Class!"); 10541 } 10542 10543 bool ASTContext::mergeExtParameterInfo( 10544 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10545 bool &CanUseFirst, bool &CanUseSecond, 10546 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10547 assert(NewParamInfos.empty() && "param info list not empty"); 10548 CanUseFirst = CanUseSecond = true; 10549 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10550 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10551 10552 // Fast path: if the first type doesn't have ext parameter infos, 10553 // we match if and only if the second type also doesn't have them. 10554 if (!FirstHasInfo && !SecondHasInfo) 10555 return true; 10556 10557 bool NeedParamInfo = false; 10558 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10559 : SecondFnType->getExtParameterInfos().size(); 10560 10561 for (size_t I = 0; I < E; ++I) { 10562 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10563 if (FirstHasInfo) 10564 FirstParam = FirstFnType->getExtParameterInfo(I); 10565 if (SecondHasInfo) 10566 SecondParam = SecondFnType->getExtParameterInfo(I); 10567 10568 // Cannot merge unless everything except the noescape flag matches. 10569 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10570 return false; 10571 10572 bool FirstNoEscape = FirstParam.isNoEscape(); 10573 bool SecondNoEscape = SecondParam.isNoEscape(); 10574 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10575 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10576 if (NewParamInfos.back().getOpaqueValue()) 10577 NeedParamInfo = true; 10578 if (FirstNoEscape != IsNoEscape) 10579 CanUseFirst = false; 10580 if (SecondNoEscape != IsNoEscape) 10581 CanUseSecond = false; 10582 } 10583 10584 if (!NeedParamInfo) 10585 NewParamInfos.clear(); 10586 10587 return true; 10588 } 10589 10590 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10591 ObjCLayouts[CD] = nullptr; 10592 } 10593 10594 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10595 /// 'RHS' attributes and returns the merged version; including for function 10596 /// return types. 10597 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10598 QualType LHSCan = getCanonicalType(LHS), 10599 RHSCan = getCanonicalType(RHS); 10600 // If two types are identical, they are compatible. 10601 if (LHSCan == RHSCan) 10602 return LHS; 10603 if (RHSCan->isFunctionType()) { 10604 if (!LHSCan->isFunctionType()) 10605 return {}; 10606 QualType OldReturnType = 10607 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10608 QualType NewReturnType = 10609 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10610 QualType ResReturnType = 10611 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10612 if (ResReturnType.isNull()) 10613 return {}; 10614 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10615 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10616 // In either case, use OldReturnType to build the new function type. 10617 const auto *F = LHS->castAs<FunctionType>(); 10618 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10619 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10620 EPI.ExtInfo = getFunctionExtInfo(LHS); 10621 QualType ResultType = 10622 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10623 return ResultType; 10624 } 10625 } 10626 return {}; 10627 } 10628 10629 // If the qualifiers are different, the types can still be merged. 10630 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10631 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10632 if (LQuals != RQuals) { 10633 // If any of these qualifiers are different, we have a type mismatch. 10634 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10635 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10636 return {}; 10637 10638 // Exactly one GC qualifier difference is allowed: __strong is 10639 // okay if the other type has no GC qualifier but is an Objective 10640 // C object pointer (i.e. implicitly strong by default). We fix 10641 // this by pretending that the unqualified type was actually 10642 // qualified __strong. 10643 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10644 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10645 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10646 10647 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10648 return {}; 10649 10650 if (GC_L == Qualifiers::Strong) 10651 return LHS; 10652 if (GC_R == Qualifiers::Strong) 10653 return RHS; 10654 return {}; 10655 } 10656 10657 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10658 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10659 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10660 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10661 if (ResQT == LHSBaseQT) 10662 return LHS; 10663 if (ResQT == RHSBaseQT) 10664 return RHS; 10665 } 10666 return {}; 10667 } 10668 10669 //===----------------------------------------------------------------------===// 10670 // Integer Predicates 10671 //===----------------------------------------------------------------------===// 10672 10673 unsigned ASTContext::getIntWidth(QualType T) const { 10674 if (const auto *ET = T->getAs<EnumType>()) 10675 T = ET->getDecl()->getIntegerType(); 10676 if (T->isBooleanType()) 10677 return 1; 10678 if (const auto *EIT = T->getAs<BitIntType>()) 10679 return EIT->getNumBits(); 10680 // For builtin types, just use the standard type sizing method 10681 return (unsigned)getTypeSize(T); 10682 } 10683 10684 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10685 assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 10686 "Unexpected type"); 10687 10688 // Turn <4 x signed int> -> <4 x unsigned int> 10689 if (const auto *VTy = T->getAs<VectorType>()) 10690 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 10691 VTy->getNumElements(), VTy->getVectorKind()); 10692 10693 // For _BitInt, return an unsigned _BitInt with same width. 10694 if (const auto *EITy = T->getAs<BitIntType>()) 10695 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 10696 10697 // For enums, get the underlying integer type of the enum, and let the general 10698 // integer type signchanging code handle it. 10699 if (const auto *ETy = T->getAs<EnumType>()) 10700 T = ETy->getDecl()->getIntegerType(); 10701 10702 switch (T->castAs<BuiltinType>()->getKind()) { 10703 case BuiltinType::Char_S: 10704 case BuiltinType::SChar: 10705 return UnsignedCharTy; 10706 case BuiltinType::Short: 10707 return UnsignedShortTy; 10708 case BuiltinType::Int: 10709 return UnsignedIntTy; 10710 case BuiltinType::Long: 10711 return UnsignedLongTy; 10712 case BuiltinType::LongLong: 10713 return UnsignedLongLongTy; 10714 case BuiltinType::Int128: 10715 return UnsignedInt128Ty; 10716 // wchar_t is special. It is either signed or not, but when it's signed, 10717 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 10718 // version of it's underlying type instead. 10719 case BuiltinType::WChar_S: 10720 return getUnsignedWCharType(); 10721 10722 case BuiltinType::ShortAccum: 10723 return UnsignedShortAccumTy; 10724 case BuiltinType::Accum: 10725 return UnsignedAccumTy; 10726 case BuiltinType::LongAccum: 10727 return UnsignedLongAccumTy; 10728 case BuiltinType::SatShortAccum: 10729 return SatUnsignedShortAccumTy; 10730 case BuiltinType::SatAccum: 10731 return SatUnsignedAccumTy; 10732 case BuiltinType::SatLongAccum: 10733 return SatUnsignedLongAccumTy; 10734 case BuiltinType::ShortFract: 10735 return UnsignedShortFractTy; 10736 case BuiltinType::Fract: 10737 return UnsignedFractTy; 10738 case BuiltinType::LongFract: 10739 return UnsignedLongFractTy; 10740 case BuiltinType::SatShortFract: 10741 return SatUnsignedShortFractTy; 10742 case BuiltinType::SatFract: 10743 return SatUnsignedFractTy; 10744 case BuiltinType::SatLongFract: 10745 return SatUnsignedLongFractTy; 10746 default: 10747 llvm_unreachable("Unexpected signed integer or fixed point type"); 10748 } 10749 } 10750 10751 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 10752 assert((T->hasUnsignedIntegerRepresentation() || 10753 T->isUnsignedFixedPointType()) && 10754 "Unexpected type"); 10755 10756 // Turn <4 x unsigned int> -> <4 x signed int> 10757 if (const auto *VTy = T->getAs<VectorType>()) 10758 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 10759 VTy->getNumElements(), VTy->getVectorKind()); 10760 10761 // For _BitInt, return a signed _BitInt with same width. 10762 if (const auto *EITy = T->getAs<BitIntType>()) 10763 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 10764 10765 // For enums, get the underlying integer type of the enum, and let the general 10766 // integer type signchanging code handle it. 10767 if (const auto *ETy = T->getAs<EnumType>()) 10768 T = ETy->getDecl()->getIntegerType(); 10769 10770 switch (T->castAs<BuiltinType>()->getKind()) { 10771 case BuiltinType::Char_U: 10772 case BuiltinType::UChar: 10773 return SignedCharTy; 10774 case BuiltinType::UShort: 10775 return ShortTy; 10776 case BuiltinType::UInt: 10777 return IntTy; 10778 case BuiltinType::ULong: 10779 return LongTy; 10780 case BuiltinType::ULongLong: 10781 return LongLongTy; 10782 case BuiltinType::UInt128: 10783 return Int128Ty; 10784 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 10785 // there's no matching "signed wchar_t". Therefore we return the signed 10786 // version of it's underlying type instead. 10787 case BuiltinType::WChar_U: 10788 return getSignedWCharType(); 10789 10790 case BuiltinType::UShortAccum: 10791 return ShortAccumTy; 10792 case BuiltinType::UAccum: 10793 return AccumTy; 10794 case BuiltinType::ULongAccum: 10795 return LongAccumTy; 10796 case BuiltinType::SatUShortAccum: 10797 return SatShortAccumTy; 10798 case BuiltinType::SatUAccum: 10799 return SatAccumTy; 10800 case BuiltinType::SatULongAccum: 10801 return SatLongAccumTy; 10802 case BuiltinType::UShortFract: 10803 return ShortFractTy; 10804 case BuiltinType::UFract: 10805 return FractTy; 10806 case BuiltinType::ULongFract: 10807 return LongFractTy; 10808 case BuiltinType::SatUShortFract: 10809 return SatShortFractTy; 10810 case BuiltinType::SatUFract: 10811 return SatFractTy; 10812 case BuiltinType::SatULongFract: 10813 return SatLongFractTy; 10814 default: 10815 llvm_unreachable("Unexpected unsigned integer or fixed point type"); 10816 } 10817 } 10818 10819 ASTMutationListener::~ASTMutationListener() = default; 10820 10821 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 10822 QualType ReturnType) {} 10823 10824 //===----------------------------------------------------------------------===// 10825 // Builtin Type Computation 10826 //===----------------------------------------------------------------------===// 10827 10828 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 10829 /// pointer over the consumed characters. This returns the resultant type. If 10830 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 10831 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 10832 /// a vector of "i*". 10833 /// 10834 /// RequiresICE is filled in on return to indicate whether the value is required 10835 /// to be an Integer Constant Expression. 10836 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 10837 ASTContext::GetBuiltinTypeError &Error, 10838 bool &RequiresICE, 10839 bool AllowTypeModifiers) { 10840 // Modifiers. 10841 int HowLong = 0; 10842 bool Signed = false, Unsigned = false; 10843 RequiresICE = false; 10844 10845 // Read the prefixed modifiers first. 10846 bool Done = false; 10847 #ifndef NDEBUG 10848 bool IsSpecial = false; 10849 #endif 10850 while (!Done) { 10851 switch (*Str++) { 10852 default: Done = true; --Str; break; 10853 case 'I': 10854 RequiresICE = true; 10855 break; 10856 case 'S': 10857 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 10858 assert(!Signed && "Can't use 'S' modifier multiple times!"); 10859 Signed = true; 10860 break; 10861 case 'U': 10862 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 10863 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 10864 Unsigned = true; 10865 break; 10866 case 'L': 10867 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 10868 assert(HowLong <= 2 && "Can't have LLLL modifier"); 10869 ++HowLong; 10870 break; 10871 case 'N': 10872 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 10873 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10874 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 10875 #ifndef NDEBUG 10876 IsSpecial = true; 10877 #endif 10878 if (Context.getTargetInfo().getLongWidth() == 32) 10879 ++HowLong; 10880 break; 10881 case 'W': 10882 // This modifier represents int64 type. 10883 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10884 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 10885 #ifndef NDEBUG 10886 IsSpecial = true; 10887 #endif 10888 switch (Context.getTargetInfo().getInt64Type()) { 10889 default: 10890 llvm_unreachable("Unexpected integer type"); 10891 case TargetInfo::SignedLong: 10892 HowLong = 1; 10893 break; 10894 case TargetInfo::SignedLongLong: 10895 HowLong = 2; 10896 break; 10897 } 10898 break; 10899 case 'Z': 10900 // This modifier represents int32 type. 10901 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10902 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 10903 #ifndef NDEBUG 10904 IsSpecial = true; 10905 #endif 10906 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 10907 default: 10908 llvm_unreachable("Unexpected integer type"); 10909 case TargetInfo::SignedInt: 10910 HowLong = 0; 10911 break; 10912 case TargetInfo::SignedLong: 10913 HowLong = 1; 10914 break; 10915 case TargetInfo::SignedLongLong: 10916 HowLong = 2; 10917 break; 10918 } 10919 break; 10920 case 'O': 10921 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10922 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 10923 #ifndef NDEBUG 10924 IsSpecial = true; 10925 #endif 10926 if (Context.getLangOpts().OpenCL) 10927 HowLong = 1; 10928 else 10929 HowLong = 2; 10930 break; 10931 } 10932 } 10933 10934 QualType Type; 10935 10936 // Read the base type. 10937 switch (*Str++) { 10938 default: llvm_unreachable("Unknown builtin type letter!"); 10939 case 'x': 10940 assert(HowLong == 0 && !Signed && !Unsigned && 10941 "Bad modifiers used with 'x'!"); 10942 Type = Context.Float16Ty; 10943 break; 10944 case 'y': 10945 assert(HowLong == 0 && !Signed && !Unsigned && 10946 "Bad modifiers used with 'y'!"); 10947 Type = Context.BFloat16Ty; 10948 break; 10949 case 'v': 10950 assert(HowLong == 0 && !Signed && !Unsigned && 10951 "Bad modifiers used with 'v'!"); 10952 Type = Context.VoidTy; 10953 break; 10954 case 'h': 10955 assert(HowLong == 0 && !Signed && !Unsigned && 10956 "Bad modifiers used with 'h'!"); 10957 Type = Context.HalfTy; 10958 break; 10959 case 'f': 10960 assert(HowLong == 0 && !Signed && !Unsigned && 10961 "Bad modifiers used with 'f'!"); 10962 Type = Context.FloatTy; 10963 break; 10964 case 'd': 10965 assert(HowLong < 3 && !Signed && !Unsigned && 10966 "Bad modifiers used with 'd'!"); 10967 if (HowLong == 1) 10968 Type = Context.LongDoubleTy; 10969 else if (HowLong == 2) 10970 Type = Context.Float128Ty; 10971 else 10972 Type = Context.DoubleTy; 10973 break; 10974 case 's': 10975 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 10976 if (Unsigned) 10977 Type = Context.UnsignedShortTy; 10978 else 10979 Type = Context.ShortTy; 10980 break; 10981 case 'i': 10982 if (HowLong == 3) 10983 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 10984 else if (HowLong == 2) 10985 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 10986 else if (HowLong == 1) 10987 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 10988 else 10989 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 10990 break; 10991 case 'c': 10992 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 10993 if (Signed) 10994 Type = Context.SignedCharTy; 10995 else if (Unsigned) 10996 Type = Context.UnsignedCharTy; 10997 else 10998 Type = Context.CharTy; 10999 break; 11000 case 'b': // boolean 11001 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11002 Type = Context.BoolTy; 11003 break; 11004 case 'z': // size_t. 11005 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11006 Type = Context.getSizeType(); 11007 break; 11008 case 'w': // wchar_t. 11009 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11010 Type = Context.getWideCharType(); 11011 break; 11012 case 'F': 11013 Type = Context.getCFConstantStringType(); 11014 break; 11015 case 'G': 11016 Type = Context.getObjCIdType(); 11017 break; 11018 case 'H': 11019 Type = Context.getObjCSelType(); 11020 break; 11021 case 'M': 11022 Type = Context.getObjCSuperType(); 11023 break; 11024 case 'a': 11025 Type = Context.getBuiltinVaListType(); 11026 assert(!Type.isNull() && "builtin va list type not initialized!"); 11027 break; 11028 case 'A': 11029 // This is a "reference" to a va_list; however, what exactly 11030 // this means depends on how va_list is defined. There are two 11031 // different kinds of va_list: ones passed by value, and ones 11032 // passed by reference. An example of a by-value va_list is 11033 // x86, where va_list is a char*. An example of by-ref va_list 11034 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11035 // we want this argument to be a char*&; for x86-64, we want 11036 // it to be a __va_list_tag*. 11037 Type = Context.getBuiltinVaListType(); 11038 assert(!Type.isNull() && "builtin va list type not initialized!"); 11039 if (Type->isArrayType()) 11040 Type = Context.getArrayDecayedType(Type); 11041 else 11042 Type = Context.getLValueReferenceType(Type); 11043 break; 11044 case 'q': { 11045 char *End; 11046 unsigned NumElements = strtoul(Str, &End, 10); 11047 assert(End != Str && "Missing vector size"); 11048 Str = End; 11049 11050 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11051 RequiresICE, false); 11052 assert(!RequiresICE && "Can't require vector ICE"); 11053 11054 Type = Context.getScalableVectorType(ElementType, NumElements); 11055 break; 11056 } 11057 case 'V': { 11058 char *End; 11059 unsigned NumElements = strtoul(Str, &End, 10); 11060 assert(End != Str && "Missing vector size"); 11061 Str = End; 11062 11063 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11064 RequiresICE, false); 11065 assert(!RequiresICE && "Can't require vector ICE"); 11066 11067 // TODO: No way to make AltiVec vectors in builtins yet. 11068 Type = Context.getVectorType(ElementType, NumElements, 11069 VectorType::GenericVector); 11070 break; 11071 } 11072 case 'E': { 11073 char *End; 11074 11075 unsigned NumElements = strtoul(Str, &End, 10); 11076 assert(End != Str && "Missing vector size"); 11077 11078 Str = End; 11079 11080 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11081 false); 11082 Type = Context.getExtVectorType(ElementType, NumElements); 11083 break; 11084 } 11085 case 'X': { 11086 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11087 false); 11088 assert(!RequiresICE && "Can't require complex ICE"); 11089 Type = Context.getComplexType(ElementType); 11090 break; 11091 } 11092 case 'Y': 11093 Type = Context.getPointerDiffType(); 11094 break; 11095 case 'P': 11096 Type = Context.getFILEType(); 11097 if (Type.isNull()) { 11098 Error = ASTContext::GE_Missing_stdio; 11099 return {}; 11100 } 11101 break; 11102 case 'J': 11103 if (Signed) 11104 Type = Context.getsigjmp_bufType(); 11105 else 11106 Type = Context.getjmp_bufType(); 11107 11108 if (Type.isNull()) { 11109 Error = ASTContext::GE_Missing_setjmp; 11110 return {}; 11111 } 11112 break; 11113 case 'K': 11114 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11115 Type = Context.getucontext_tType(); 11116 11117 if (Type.isNull()) { 11118 Error = ASTContext::GE_Missing_ucontext; 11119 return {}; 11120 } 11121 break; 11122 case 'p': 11123 Type = Context.getProcessIDType(); 11124 break; 11125 } 11126 11127 // If there are modifiers and if we're allowed to parse them, go for it. 11128 Done = !AllowTypeModifiers; 11129 while (!Done) { 11130 switch (char c = *Str++) { 11131 default: Done = true; --Str; break; 11132 case '*': 11133 case '&': { 11134 // Both pointers and references can have their pointee types 11135 // qualified with an address space. 11136 char *End; 11137 unsigned AddrSpace = strtoul(Str, &End, 10); 11138 if (End != Str) { 11139 // Note AddrSpace == 0 is not the same as an unspecified address space. 11140 Type = Context.getAddrSpaceQualType( 11141 Type, 11142 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11143 Str = End; 11144 } 11145 if (c == '*') 11146 Type = Context.getPointerType(Type); 11147 else 11148 Type = Context.getLValueReferenceType(Type); 11149 break; 11150 } 11151 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11152 case 'C': 11153 Type = Type.withConst(); 11154 break; 11155 case 'D': 11156 Type = Context.getVolatileType(Type); 11157 break; 11158 case 'R': 11159 Type = Type.withRestrict(); 11160 break; 11161 } 11162 } 11163 11164 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11165 "Integer constant 'I' type must be an integer"); 11166 11167 return Type; 11168 } 11169 11170 // On some targets such as PowerPC, some of the builtins are defined with custom 11171 // type descriptors for target-dependent types. These descriptors are decoded in 11172 // other functions, but it may be useful to be able to fall back to default 11173 // descriptor decoding to define builtins mixing target-dependent and target- 11174 // independent types. This function allows decoding one type descriptor with 11175 // default decoding. 11176 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11177 GetBuiltinTypeError &Error, bool &RequireICE, 11178 bool AllowTypeModifiers) const { 11179 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11180 } 11181 11182 /// GetBuiltinType - Return the type for the specified builtin. 11183 QualType ASTContext::GetBuiltinType(unsigned Id, 11184 GetBuiltinTypeError &Error, 11185 unsigned *IntegerConstantArgs) const { 11186 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11187 if (TypeStr[0] == '\0') { 11188 Error = GE_Missing_type; 11189 return {}; 11190 } 11191 11192 SmallVector<QualType, 8> ArgTypes; 11193 11194 bool RequiresICE = false; 11195 Error = GE_None; 11196 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11197 RequiresICE, true); 11198 if (Error != GE_None) 11199 return {}; 11200 11201 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11202 11203 while (TypeStr[0] && TypeStr[0] != '.') { 11204 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11205 if (Error != GE_None) 11206 return {}; 11207 11208 // If this argument is required to be an IntegerConstantExpression and the 11209 // caller cares, fill in the bitmask we return. 11210 if (RequiresICE && IntegerConstantArgs) 11211 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11212 11213 // Do array -> pointer decay. The builtin should use the decayed type. 11214 if (Ty->isArrayType()) 11215 Ty = getArrayDecayedType(Ty); 11216 11217 ArgTypes.push_back(Ty); 11218 } 11219 11220 if (Id == Builtin::BI__GetExceptionInfo) 11221 return {}; 11222 11223 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11224 "'.' should only occur at end of builtin type list!"); 11225 11226 bool Variadic = (TypeStr[0] == '.'); 11227 11228 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11229 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11230 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11231 11232 11233 // We really shouldn't be making a no-proto type here. 11234 if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus) 11235 return getFunctionNoProtoType(ResType, EI); 11236 11237 FunctionProtoType::ExtProtoInfo EPI; 11238 EPI.ExtInfo = EI; 11239 EPI.Variadic = Variadic; 11240 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11241 EPI.ExceptionSpec.Type = 11242 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11243 11244 return getFunctionType(ResType, ArgTypes, EPI); 11245 } 11246 11247 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11248 const FunctionDecl *FD) { 11249 if (!FD->isExternallyVisible()) 11250 return GVA_Internal; 11251 11252 // Non-user-provided functions get emitted as weak definitions with every 11253 // use, no matter whether they've been explicitly instantiated etc. 11254 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 11255 if (!MD->isUserProvided()) 11256 return GVA_DiscardableODR; 11257 11258 GVALinkage External; 11259 switch (FD->getTemplateSpecializationKind()) { 11260 case TSK_Undeclared: 11261 case TSK_ExplicitSpecialization: 11262 External = GVA_StrongExternal; 11263 break; 11264 11265 case TSK_ExplicitInstantiationDefinition: 11266 return GVA_StrongODR; 11267 11268 // C++11 [temp.explicit]p10: 11269 // [ Note: The intent is that an inline function that is the subject of 11270 // an explicit instantiation declaration will still be implicitly 11271 // instantiated when used so that the body can be considered for 11272 // inlining, but that no out-of-line copy of the inline function would be 11273 // generated in the translation unit. -- end note ] 11274 case TSK_ExplicitInstantiationDeclaration: 11275 return GVA_AvailableExternally; 11276 11277 case TSK_ImplicitInstantiation: 11278 External = GVA_DiscardableODR; 11279 break; 11280 } 11281 11282 if (!FD->isInlined()) 11283 return External; 11284 11285 if ((!Context.getLangOpts().CPlusPlus && 11286 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11287 !FD->hasAttr<DLLExportAttr>()) || 11288 FD->hasAttr<GNUInlineAttr>()) { 11289 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11290 11291 // GNU or C99 inline semantics. Determine whether this symbol should be 11292 // externally visible. 11293 if (FD->isInlineDefinitionExternallyVisible()) 11294 return External; 11295 11296 // C99 inline semantics, where the symbol is not externally visible. 11297 return GVA_AvailableExternally; 11298 } 11299 11300 // Functions specified with extern and inline in -fms-compatibility mode 11301 // forcibly get emitted. While the body of the function cannot be later 11302 // replaced, the function definition cannot be discarded. 11303 if (FD->isMSExternInline()) 11304 return GVA_StrongODR; 11305 11306 return GVA_DiscardableODR; 11307 } 11308 11309 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11310 const Decl *D, GVALinkage L) { 11311 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11312 // dllexport/dllimport on inline functions. 11313 if (D->hasAttr<DLLImportAttr>()) { 11314 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11315 return GVA_AvailableExternally; 11316 } else if (D->hasAttr<DLLExportAttr>()) { 11317 if (L == GVA_DiscardableODR) 11318 return GVA_StrongODR; 11319 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11320 // Device-side functions with __global__ attribute must always be 11321 // visible externally so they can be launched from host. 11322 if (D->hasAttr<CUDAGlobalAttr>() && 11323 (L == GVA_DiscardableODR || L == GVA_Internal)) 11324 return GVA_StrongODR; 11325 // Single source offloading languages like CUDA/HIP need to be able to 11326 // access static device variables from host code of the same compilation 11327 // unit. This is done by externalizing the static variable with a shared 11328 // name between the host and device compilation which is the same for the 11329 // same compilation unit whereas different among different compilation 11330 // units. 11331 if (Context.shouldExternalizeStaticVar(D)) 11332 return GVA_StrongExternal; 11333 } 11334 return L; 11335 } 11336 11337 /// Adjust the GVALinkage for a declaration based on what an external AST source 11338 /// knows about whether there can be other definitions of this declaration. 11339 static GVALinkage 11340 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11341 GVALinkage L) { 11342 ExternalASTSource *Source = Ctx.getExternalSource(); 11343 if (!Source) 11344 return L; 11345 11346 switch (Source->hasExternalDefinitions(D)) { 11347 case ExternalASTSource::EK_Never: 11348 // Other translation units rely on us to provide the definition. 11349 if (L == GVA_DiscardableODR) 11350 return GVA_StrongODR; 11351 break; 11352 11353 case ExternalASTSource::EK_Always: 11354 return GVA_AvailableExternally; 11355 11356 case ExternalASTSource::EK_ReplyHazy: 11357 break; 11358 } 11359 return L; 11360 } 11361 11362 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11363 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11364 adjustGVALinkageForAttributes(*this, FD, 11365 basicGVALinkageForFunction(*this, FD))); 11366 } 11367 11368 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11369 const VarDecl *VD) { 11370 if (!VD->isExternallyVisible()) 11371 return GVA_Internal; 11372 11373 if (VD->isStaticLocal()) { 11374 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11375 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11376 LexicalContext = LexicalContext->getLexicalParent(); 11377 11378 // ObjC Blocks can create local variables that don't have a FunctionDecl 11379 // LexicalContext. 11380 if (!LexicalContext) 11381 return GVA_DiscardableODR; 11382 11383 // Otherwise, let the static local variable inherit its linkage from the 11384 // nearest enclosing function. 11385 auto StaticLocalLinkage = 11386 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11387 11388 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11389 // be emitted in any object with references to the symbol for the object it 11390 // contains, whether inline or out-of-line." 11391 // Similar behavior is observed with MSVC. An alternative ABI could use 11392 // StrongODR/AvailableExternally to match the function, but none are 11393 // known/supported currently. 11394 if (StaticLocalLinkage == GVA_StrongODR || 11395 StaticLocalLinkage == GVA_AvailableExternally) 11396 return GVA_DiscardableODR; 11397 return StaticLocalLinkage; 11398 } 11399 11400 // MSVC treats in-class initialized static data members as definitions. 11401 // By giving them non-strong linkage, out-of-line definitions won't 11402 // cause link errors. 11403 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11404 return GVA_DiscardableODR; 11405 11406 // Most non-template variables have strong linkage; inline variables are 11407 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11408 GVALinkage StrongLinkage; 11409 switch (Context.getInlineVariableDefinitionKind(VD)) { 11410 case ASTContext::InlineVariableDefinitionKind::None: 11411 StrongLinkage = GVA_StrongExternal; 11412 break; 11413 case ASTContext::InlineVariableDefinitionKind::Weak: 11414 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11415 StrongLinkage = GVA_DiscardableODR; 11416 break; 11417 case ASTContext::InlineVariableDefinitionKind::Strong: 11418 StrongLinkage = GVA_StrongODR; 11419 break; 11420 } 11421 11422 switch (VD->getTemplateSpecializationKind()) { 11423 case TSK_Undeclared: 11424 return StrongLinkage; 11425 11426 case TSK_ExplicitSpecialization: 11427 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11428 VD->isStaticDataMember() 11429 ? GVA_StrongODR 11430 : StrongLinkage; 11431 11432 case TSK_ExplicitInstantiationDefinition: 11433 return GVA_StrongODR; 11434 11435 case TSK_ExplicitInstantiationDeclaration: 11436 return GVA_AvailableExternally; 11437 11438 case TSK_ImplicitInstantiation: 11439 return GVA_DiscardableODR; 11440 } 11441 11442 llvm_unreachable("Invalid Linkage!"); 11443 } 11444 11445 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { 11446 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11447 adjustGVALinkageForAttributes(*this, VD, 11448 basicGVALinkageForVariable(*this, VD))); 11449 } 11450 11451 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11452 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11453 if (!VD->isFileVarDecl()) 11454 return false; 11455 // Global named register variables (GNU extension) are never emitted. 11456 if (VD->getStorageClass() == SC_Register) 11457 return false; 11458 if (VD->getDescribedVarTemplate() || 11459 isa<VarTemplatePartialSpecializationDecl>(VD)) 11460 return false; 11461 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11462 // We never need to emit an uninstantiated function template. 11463 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11464 return false; 11465 } else if (isa<PragmaCommentDecl>(D)) 11466 return true; 11467 else if (isa<PragmaDetectMismatchDecl>(D)) 11468 return true; 11469 else if (isa<OMPRequiresDecl>(D)) 11470 return true; 11471 else if (isa<OMPThreadPrivateDecl>(D)) 11472 return !D->getDeclContext()->isDependentContext(); 11473 else if (isa<OMPAllocateDecl>(D)) 11474 return !D->getDeclContext()->isDependentContext(); 11475 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11476 return !D->getDeclContext()->isDependentContext(); 11477 else if (isa<ImportDecl>(D)) 11478 return true; 11479 else 11480 return false; 11481 11482 // If this is a member of a class template, we do not need to emit it. 11483 if (D->getDeclContext()->isDependentContext()) 11484 return false; 11485 11486 // Weak references don't produce any output by themselves. 11487 if (D->hasAttr<WeakRefAttr>()) 11488 return false; 11489 11490 // Aliases and used decls are required. 11491 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11492 return true; 11493 11494 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11495 // Forward declarations aren't required. 11496 if (!FD->doesThisDeclarationHaveABody()) 11497 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11498 11499 // Constructors and destructors are required. 11500 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11501 return true; 11502 11503 // The key function for a class is required. This rule only comes 11504 // into play when inline functions can be key functions, though. 11505 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11506 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11507 const CXXRecordDecl *RD = MD->getParent(); 11508 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11509 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11510 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11511 return true; 11512 } 11513 } 11514 } 11515 11516 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11517 11518 // static, static inline, always_inline, and extern inline functions can 11519 // always be deferred. Normal inline functions can be deferred in C99/C++. 11520 // Implicit template instantiations can also be deferred in C++. 11521 return !isDiscardableGVALinkage(Linkage); 11522 } 11523 11524 const auto *VD = cast<VarDecl>(D); 11525 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11526 11527 // If the decl is marked as `declare target to`, it should be emitted for the 11528 // host and for the device. 11529 if (LangOpts.OpenMP && 11530 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11531 return true; 11532 11533 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11534 !isMSStaticDataMemberInlineDefinition(VD)) 11535 return false; 11536 11537 // Variables that can be needed in other TUs are required. 11538 auto Linkage = GetGVALinkageForVariable(VD); 11539 if (!isDiscardableGVALinkage(Linkage)) 11540 return true; 11541 11542 // We never need to emit a variable that is available in another TU. 11543 if (Linkage == GVA_AvailableExternally) 11544 return false; 11545 11546 // Variables that have destruction with side-effects are required. 11547 if (VD->needsDestruction(*this)) 11548 return true; 11549 11550 // Variables that have initialization with side-effects are required. 11551 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11552 // We can get a value-dependent initializer during error recovery. 11553 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11554 return true; 11555 11556 // Likewise, variables with tuple-like bindings are required if their 11557 // bindings have side-effects. 11558 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11559 for (const auto *BD : DD->bindings()) 11560 if (const auto *BindingVD = BD->getHoldingVar()) 11561 if (DeclMustBeEmitted(BindingVD)) 11562 return true; 11563 11564 return false; 11565 } 11566 11567 void ASTContext::forEachMultiversionedFunctionVersion( 11568 const FunctionDecl *FD, 11569 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11570 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11571 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11572 FD = FD->getMostRecentDecl(); 11573 // FIXME: The order of traversal here matters and depends on the order of 11574 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11575 // shouldn't rely on that. 11576 for (auto *CurDecl : 11577 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11578 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11579 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11580 std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) { 11581 SeenDecls.insert(CurFD); 11582 Pred(CurFD); 11583 } 11584 } 11585 } 11586 11587 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11588 bool IsCXXMethod, 11589 bool IsBuiltin) const { 11590 // Pass through to the C++ ABI object 11591 if (IsCXXMethod) 11592 return ABI->getDefaultMethodCallConv(IsVariadic); 11593 11594 // Builtins ignore user-specified default calling convention and remain the 11595 // Target's default calling convention. 11596 if (!IsBuiltin) { 11597 switch (LangOpts.getDefaultCallingConv()) { 11598 case LangOptions::DCC_None: 11599 break; 11600 case LangOptions::DCC_CDecl: 11601 return CC_C; 11602 case LangOptions::DCC_FastCall: 11603 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11604 return CC_X86FastCall; 11605 break; 11606 case LangOptions::DCC_StdCall: 11607 if (!IsVariadic) 11608 return CC_X86StdCall; 11609 break; 11610 case LangOptions::DCC_VectorCall: 11611 // __vectorcall cannot be applied to variadic functions. 11612 if (!IsVariadic) 11613 return CC_X86VectorCall; 11614 break; 11615 case LangOptions::DCC_RegCall: 11616 // __regcall cannot be applied to variadic functions. 11617 if (!IsVariadic) 11618 return CC_X86RegCall; 11619 break; 11620 } 11621 } 11622 return Target->getDefaultCallingConv(); 11623 } 11624 11625 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11626 // Pass through to the C++ ABI object 11627 return ABI->isNearlyEmpty(RD); 11628 } 11629 11630 VTableContextBase *ASTContext::getVTableContext() { 11631 if (!VTContext.get()) { 11632 auto ABI = Target->getCXXABI(); 11633 if (ABI.isMicrosoft()) 11634 VTContext.reset(new MicrosoftVTableContext(*this)); 11635 else { 11636 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11637 ? ItaniumVTableContext::Relative 11638 : ItaniumVTableContext::Pointer; 11639 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 11640 } 11641 } 11642 return VTContext.get(); 11643 } 11644 11645 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 11646 if (!T) 11647 T = Target; 11648 switch (T->getCXXABI().getKind()) { 11649 case TargetCXXABI::AppleARM64: 11650 case TargetCXXABI::Fuchsia: 11651 case TargetCXXABI::GenericAArch64: 11652 case TargetCXXABI::GenericItanium: 11653 case TargetCXXABI::GenericARM: 11654 case TargetCXXABI::GenericMIPS: 11655 case TargetCXXABI::iOS: 11656 case TargetCXXABI::WebAssembly: 11657 case TargetCXXABI::WatchOS: 11658 case TargetCXXABI::XL: 11659 return ItaniumMangleContext::create(*this, getDiagnostics()); 11660 case TargetCXXABI::Microsoft: 11661 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11662 } 11663 llvm_unreachable("Unsupported ABI"); 11664 } 11665 11666 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 11667 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 11668 "Device mangle context does not support Microsoft mangling."); 11669 switch (T.getCXXABI().getKind()) { 11670 case TargetCXXABI::AppleARM64: 11671 case TargetCXXABI::Fuchsia: 11672 case TargetCXXABI::GenericAArch64: 11673 case TargetCXXABI::GenericItanium: 11674 case TargetCXXABI::GenericARM: 11675 case TargetCXXABI::GenericMIPS: 11676 case TargetCXXABI::iOS: 11677 case TargetCXXABI::WebAssembly: 11678 case TargetCXXABI::WatchOS: 11679 case TargetCXXABI::XL: 11680 return ItaniumMangleContext::create( 11681 *this, getDiagnostics(), 11682 [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> { 11683 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 11684 return RD->getDeviceLambdaManglingNumber(); 11685 return llvm::None; 11686 }); 11687 case TargetCXXABI::Microsoft: 11688 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11689 } 11690 llvm_unreachable("Unsupported ABI"); 11691 } 11692 11693 CXXABI::~CXXABI() = default; 11694 11695 size_t ASTContext::getSideTableAllocatedMemory() const { 11696 return ASTRecordLayouts.getMemorySize() + 11697 llvm::capacity_in_bytes(ObjCLayouts) + 11698 llvm::capacity_in_bytes(KeyFunctions) + 11699 llvm::capacity_in_bytes(ObjCImpls) + 11700 llvm::capacity_in_bytes(BlockVarCopyInits) + 11701 llvm::capacity_in_bytes(DeclAttrs) + 11702 llvm::capacity_in_bytes(TemplateOrInstantiation) + 11703 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 11704 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 11705 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 11706 llvm::capacity_in_bytes(OverriddenMethods) + 11707 llvm::capacity_in_bytes(Types) + 11708 llvm::capacity_in_bytes(VariableArrayTypes); 11709 } 11710 11711 /// getIntTypeForBitwidth - 11712 /// sets integer QualTy according to specified details: 11713 /// bitwidth, signed/unsigned. 11714 /// Returns empty type if there is no appropriate target types. 11715 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 11716 unsigned Signed) const { 11717 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 11718 CanQualType QualTy = getFromTargetType(Ty); 11719 if (!QualTy && DestWidth == 128) 11720 return Signed ? Int128Ty : UnsignedInt128Ty; 11721 return QualTy; 11722 } 11723 11724 /// getRealTypeForBitwidth - 11725 /// sets floating point QualTy according to specified bitwidth. 11726 /// Returns empty type if there is no appropriate target types. 11727 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 11728 FloatModeKind ExplicitType) const { 11729 FloatModeKind Ty = 11730 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 11731 switch (Ty) { 11732 case FloatModeKind::Float: 11733 return FloatTy; 11734 case FloatModeKind::Double: 11735 return DoubleTy; 11736 case FloatModeKind::LongDouble: 11737 return LongDoubleTy; 11738 case FloatModeKind::Float128: 11739 return Float128Ty; 11740 case FloatModeKind::Ibm128: 11741 return Ibm128Ty; 11742 case FloatModeKind::NoFloat: 11743 return {}; 11744 } 11745 11746 llvm_unreachable("Unhandled TargetInfo::RealType value"); 11747 } 11748 11749 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 11750 if (Number > 1) 11751 MangleNumbers[ND] = Number; 11752 } 11753 11754 unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const { 11755 auto I = MangleNumbers.find(ND); 11756 return I != MangleNumbers.end() ? I->second : 1; 11757 } 11758 11759 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 11760 if (Number > 1) 11761 StaticLocalNumbers[VD] = Number; 11762 } 11763 11764 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 11765 auto I = StaticLocalNumbers.find(VD); 11766 return I != StaticLocalNumbers.end() ? I->second : 1; 11767 } 11768 11769 MangleNumberingContext & 11770 ASTContext::getManglingNumberContext(const DeclContext *DC) { 11771 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11772 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 11773 if (!MCtx) 11774 MCtx = createMangleNumberingContext(); 11775 return *MCtx; 11776 } 11777 11778 MangleNumberingContext & 11779 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 11780 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11781 std::unique_ptr<MangleNumberingContext> &MCtx = 11782 ExtraMangleNumberingContexts[D]; 11783 if (!MCtx) 11784 MCtx = createMangleNumberingContext(); 11785 return *MCtx; 11786 } 11787 11788 std::unique_ptr<MangleNumberingContext> 11789 ASTContext::createMangleNumberingContext() const { 11790 return ABI->createMangleNumberingContext(); 11791 } 11792 11793 const CXXConstructorDecl * 11794 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 11795 return ABI->getCopyConstructorForExceptionObject( 11796 cast<CXXRecordDecl>(RD->getFirstDecl())); 11797 } 11798 11799 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 11800 CXXConstructorDecl *CD) { 11801 return ABI->addCopyConstructorForExceptionObject( 11802 cast<CXXRecordDecl>(RD->getFirstDecl()), 11803 cast<CXXConstructorDecl>(CD->getFirstDecl())); 11804 } 11805 11806 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 11807 TypedefNameDecl *DD) { 11808 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 11809 } 11810 11811 TypedefNameDecl * 11812 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 11813 return ABI->getTypedefNameForUnnamedTagDecl(TD); 11814 } 11815 11816 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 11817 DeclaratorDecl *DD) { 11818 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 11819 } 11820 11821 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 11822 return ABI->getDeclaratorForUnnamedTagDecl(TD); 11823 } 11824 11825 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 11826 ParamIndices[D] = index; 11827 } 11828 11829 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 11830 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 11831 assert(I != ParamIndices.end() && 11832 "ParmIndices lacks entry set by ParmVarDecl"); 11833 return I->second; 11834 } 11835 11836 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 11837 unsigned Length) const { 11838 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 11839 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 11840 EltTy = EltTy.withConst(); 11841 11842 EltTy = adjustStringLiteralBaseType(EltTy); 11843 11844 // Get an array type for the string, according to C99 6.4.5. This includes 11845 // the null terminator character. 11846 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 11847 ArrayType::Normal, /*IndexTypeQuals*/ 0); 11848 } 11849 11850 StringLiteral * 11851 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 11852 StringLiteral *&Result = StringLiteralCache[Key]; 11853 if (!Result) 11854 Result = StringLiteral::Create( 11855 *this, Key, StringLiteral::Ascii, 11856 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 11857 SourceLocation()); 11858 return Result; 11859 } 11860 11861 MSGuidDecl * 11862 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 11863 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 11864 11865 llvm::FoldingSetNodeID ID; 11866 MSGuidDecl::Profile(ID, Parts); 11867 11868 void *InsertPos; 11869 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 11870 return Existing; 11871 11872 QualType GUIDType = getMSGuidType().withConst(); 11873 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 11874 MSGuidDecls.InsertNode(New, InsertPos); 11875 return New; 11876 } 11877 11878 TemplateParamObjectDecl * 11879 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 11880 assert(T->isRecordType() && "template param object of unexpected type"); 11881 11882 // C++ [temp.param]p8: 11883 // [...] a static storage duration object of type 'const T' [...] 11884 T.addConst(); 11885 11886 llvm::FoldingSetNodeID ID; 11887 TemplateParamObjectDecl::Profile(ID, T, V); 11888 11889 void *InsertPos; 11890 if (TemplateParamObjectDecl *Existing = 11891 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 11892 return Existing; 11893 11894 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 11895 TemplateParamObjectDecls.InsertNode(New, InsertPos); 11896 return New; 11897 } 11898 11899 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 11900 const llvm::Triple &T = getTargetInfo().getTriple(); 11901 if (!T.isOSDarwin()) 11902 return false; 11903 11904 if (!(T.isiOS() && T.isOSVersionLT(7)) && 11905 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 11906 return false; 11907 11908 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 11909 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 11910 uint64_t Size = sizeChars.getQuantity(); 11911 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 11912 unsigned Align = alignChars.getQuantity(); 11913 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 11914 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 11915 } 11916 11917 bool 11918 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 11919 const ObjCMethodDecl *MethodImpl) { 11920 // No point trying to match an unavailable/deprecated mothod. 11921 if (MethodDecl->hasAttr<UnavailableAttr>() 11922 || MethodDecl->hasAttr<DeprecatedAttr>()) 11923 return false; 11924 if (MethodDecl->getObjCDeclQualifier() != 11925 MethodImpl->getObjCDeclQualifier()) 11926 return false; 11927 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 11928 return false; 11929 11930 if (MethodDecl->param_size() != MethodImpl->param_size()) 11931 return false; 11932 11933 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 11934 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 11935 EF = MethodDecl->param_end(); 11936 IM != EM && IF != EF; ++IM, ++IF) { 11937 const ParmVarDecl *DeclVar = (*IF); 11938 const ParmVarDecl *ImplVar = (*IM); 11939 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 11940 return false; 11941 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 11942 return false; 11943 } 11944 11945 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 11946 } 11947 11948 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 11949 LangAS AS; 11950 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 11951 AS = LangAS::Default; 11952 else 11953 AS = QT->getPointeeType().getAddressSpace(); 11954 11955 return getTargetInfo().getNullPointerValue(AS); 11956 } 11957 11958 unsigned ASTContext::getTargetAddressSpace(QualType T) const { 11959 return T->isFunctionType() ? getTargetInfo().getProgramAddressSpace() 11960 : getTargetAddressSpace(T.getQualifiers()); 11961 } 11962 11963 unsigned ASTContext::getTargetAddressSpace(Qualifiers Q) const { 11964 return getTargetAddressSpace(Q.getAddressSpace()); 11965 } 11966 11967 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 11968 if (isTargetAddressSpace(AS)) 11969 return toTargetAddressSpace(AS); 11970 else 11971 return (*AddrSpaceMap)[(unsigned)AS]; 11972 } 11973 11974 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 11975 assert(Ty->isFixedPointType()); 11976 11977 if (Ty->isSaturatedFixedPointType()) return Ty; 11978 11979 switch (Ty->castAs<BuiltinType>()->getKind()) { 11980 default: 11981 llvm_unreachable("Not a fixed point type!"); 11982 case BuiltinType::ShortAccum: 11983 return SatShortAccumTy; 11984 case BuiltinType::Accum: 11985 return SatAccumTy; 11986 case BuiltinType::LongAccum: 11987 return SatLongAccumTy; 11988 case BuiltinType::UShortAccum: 11989 return SatUnsignedShortAccumTy; 11990 case BuiltinType::UAccum: 11991 return SatUnsignedAccumTy; 11992 case BuiltinType::ULongAccum: 11993 return SatUnsignedLongAccumTy; 11994 case BuiltinType::ShortFract: 11995 return SatShortFractTy; 11996 case BuiltinType::Fract: 11997 return SatFractTy; 11998 case BuiltinType::LongFract: 11999 return SatLongFractTy; 12000 case BuiltinType::UShortFract: 12001 return SatUnsignedShortFractTy; 12002 case BuiltinType::UFract: 12003 return SatUnsignedFractTy; 12004 case BuiltinType::ULongFract: 12005 return SatUnsignedLongFractTy; 12006 } 12007 } 12008 12009 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 12010 if (LangOpts.OpenCL) 12011 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 12012 12013 if (LangOpts.CUDA) 12014 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 12015 12016 return getLangASFromTargetAS(AS); 12017 } 12018 12019 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 12020 // doesn't include ASTContext.h 12021 template 12022 clang::LazyGenerationalUpdatePtr< 12023 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 12024 clang::LazyGenerationalUpdatePtr< 12025 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 12026 const clang::ASTContext &Ctx, Decl *Value); 12027 12028 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 12029 assert(Ty->isFixedPointType()); 12030 12031 const TargetInfo &Target = getTargetInfo(); 12032 switch (Ty->castAs<BuiltinType>()->getKind()) { 12033 default: 12034 llvm_unreachable("Not a fixed point type!"); 12035 case BuiltinType::ShortAccum: 12036 case BuiltinType::SatShortAccum: 12037 return Target.getShortAccumScale(); 12038 case BuiltinType::Accum: 12039 case BuiltinType::SatAccum: 12040 return Target.getAccumScale(); 12041 case BuiltinType::LongAccum: 12042 case BuiltinType::SatLongAccum: 12043 return Target.getLongAccumScale(); 12044 case BuiltinType::UShortAccum: 12045 case BuiltinType::SatUShortAccum: 12046 return Target.getUnsignedShortAccumScale(); 12047 case BuiltinType::UAccum: 12048 case BuiltinType::SatUAccum: 12049 return Target.getUnsignedAccumScale(); 12050 case BuiltinType::ULongAccum: 12051 case BuiltinType::SatULongAccum: 12052 return Target.getUnsignedLongAccumScale(); 12053 case BuiltinType::ShortFract: 12054 case BuiltinType::SatShortFract: 12055 return Target.getShortFractScale(); 12056 case BuiltinType::Fract: 12057 case BuiltinType::SatFract: 12058 return Target.getFractScale(); 12059 case BuiltinType::LongFract: 12060 case BuiltinType::SatLongFract: 12061 return Target.getLongFractScale(); 12062 case BuiltinType::UShortFract: 12063 case BuiltinType::SatUShortFract: 12064 return Target.getUnsignedShortFractScale(); 12065 case BuiltinType::UFract: 12066 case BuiltinType::SatUFract: 12067 return Target.getUnsignedFractScale(); 12068 case BuiltinType::ULongFract: 12069 case BuiltinType::SatULongFract: 12070 return Target.getUnsignedLongFractScale(); 12071 } 12072 } 12073 12074 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 12075 assert(Ty->isFixedPointType()); 12076 12077 const TargetInfo &Target = getTargetInfo(); 12078 switch (Ty->castAs<BuiltinType>()->getKind()) { 12079 default: 12080 llvm_unreachable("Not a fixed point type!"); 12081 case BuiltinType::ShortAccum: 12082 case BuiltinType::SatShortAccum: 12083 return Target.getShortAccumIBits(); 12084 case BuiltinType::Accum: 12085 case BuiltinType::SatAccum: 12086 return Target.getAccumIBits(); 12087 case BuiltinType::LongAccum: 12088 case BuiltinType::SatLongAccum: 12089 return Target.getLongAccumIBits(); 12090 case BuiltinType::UShortAccum: 12091 case BuiltinType::SatUShortAccum: 12092 return Target.getUnsignedShortAccumIBits(); 12093 case BuiltinType::UAccum: 12094 case BuiltinType::SatUAccum: 12095 return Target.getUnsignedAccumIBits(); 12096 case BuiltinType::ULongAccum: 12097 case BuiltinType::SatULongAccum: 12098 return Target.getUnsignedLongAccumIBits(); 12099 case BuiltinType::ShortFract: 12100 case BuiltinType::SatShortFract: 12101 case BuiltinType::Fract: 12102 case BuiltinType::SatFract: 12103 case BuiltinType::LongFract: 12104 case BuiltinType::SatLongFract: 12105 case BuiltinType::UShortFract: 12106 case BuiltinType::SatUShortFract: 12107 case BuiltinType::UFract: 12108 case BuiltinType::SatUFract: 12109 case BuiltinType::ULongFract: 12110 case BuiltinType::SatULongFract: 12111 return 0; 12112 } 12113 } 12114 12115 llvm::FixedPointSemantics 12116 ASTContext::getFixedPointSemantics(QualType Ty) const { 12117 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 12118 "Can only get the fixed point semantics for a " 12119 "fixed point or integer type."); 12120 if (Ty->isIntegerType()) 12121 return llvm::FixedPointSemantics::GetIntegerSemantics( 12122 getIntWidth(Ty), Ty->isSignedIntegerType()); 12123 12124 bool isSigned = Ty->isSignedFixedPointType(); 12125 return llvm::FixedPointSemantics( 12126 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 12127 Ty->isSaturatedFixedPointType(), 12128 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 12129 } 12130 12131 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 12132 assert(Ty->isFixedPointType()); 12133 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 12134 } 12135 12136 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 12137 assert(Ty->isFixedPointType()); 12138 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 12139 } 12140 12141 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 12142 assert(Ty->isUnsignedFixedPointType() && 12143 "Expected unsigned fixed point type"); 12144 12145 switch (Ty->castAs<BuiltinType>()->getKind()) { 12146 case BuiltinType::UShortAccum: 12147 return ShortAccumTy; 12148 case BuiltinType::UAccum: 12149 return AccumTy; 12150 case BuiltinType::ULongAccum: 12151 return LongAccumTy; 12152 case BuiltinType::SatUShortAccum: 12153 return SatShortAccumTy; 12154 case BuiltinType::SatUAccum: 12155 return SatAccumTy; 12156 case BuiltinType::SatULongAccum: 12157 return SatLongAccumTy; 12158 case BuiltinType::UShortFract: 12159 return ShortFractTy; 12160 case BuiltinType::UFract: 12161 return FractTy; 12162 case BuiltinType::ULongFract: 12163 return LongFractTy; 12164 case BuiltinType::SatUShortFract: 12165 return SatShortFractTy; 12166 case BuiltinType::SatUFract: 12167 return SatFractTy; 12168 case BuiltinType::SatULongFract: 12169 return SatLongFractTy; 12170 default: 12171 llvm_unreachable("Unexpected unsigned fixed point type"); 12172 } 12173 } 12174 12175 ParsedTargetAttr 12176 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 12177 assert(TD != nullptr); 12178 ParsedTargetAttr ParsedAttr = TD->parse(); 12179 12180 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 12181 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 12182 }); 12183 return ParsedAttr; 12184 } 12185 12186 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 12187 const FunctionDecl *FD) const { 12188 if (FD) 12189 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 12190 else 12191 Target->initFeatureMap(FeatureMap, getDiagnostics(), 12192 Target->getTargetOpts().CPU, 12193 Target->getTargetOpts().Features); 12194 } 12195 12196 // Fills in the supplied string map with the set of target features for the 12197 // passed in function. 12198 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 12199 GlobalDecl GD) const { 12200 StringRef TargetCPU = Target->getTargetOpts().CPU; 12201 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 12202 if (const auto *TD = FD->getAttr<TargetAttr>()) { 12203 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 12204 12205 // Make a copy of the features as passed on the command line into the 12206 // beginning of the additional features from the function to override. 12207 ParsedAttr.Features.insert( 12208 ParsedAttr.Features.begin(), 12209 Target->getTargetOpts().FeaturesAsWritten.begin(), 12210 Target->getTargetOpts().FeaturesAsWritten.end()); 12211 12212 if (ParsedAttr.Architecture != "" && 12213 Target->isValidCPUName(ParsedAttr.Architecture)) 12214 TargetCPU = ParsedAttr.Architecture; 12215 12216 // Now populate the feature map, first with the TargetCPU which is either 12217 // the default or a new one from the target attribute string. Then we'll use 12218 // the passed in features (FeaturesAsWritten) along with the new ones from 12219 // the attribute. 12220 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 12221 ParsedAttr.Features); 12222 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 12223 llvm::SmallVector<StringRef, 32> FeaturesTmp; 12224 Target->getCPUSpecificCPUDispatchFeatures( 12225 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 12226 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 12227 Features.insert(Features.begin(), 12228 Target->getTargetOpts().FeaturesAsWritten.begin(), 12229 Target->getTargetOpts().FeaturesAsWritten.end()); 12230 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 12231 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 12232 std::vector<std::string> Features; 12233 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 12234 if (VersionStr.startswith("arch=")) 12235 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 12236 else if (VersionStr != "default") 12237 Features.push_back((StringRef{"+"} + VersionStr).str()); 12238 12239 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 12240 } else { 12241 FeatureMap = Target->getTargetOpts().FeatureMap; 12242 } 12243 } 12244 12245 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 12246 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 12247 return *OMPTraitInfoVector.back(); 12248 } 12249 12250 const StreamingDiagnostic &clang:: 12251 operator<<(const StreamingDiagnostic &DB, 12252 const ASTContext::SectionInfo &Section) { 12253 if (Section.Decl) 12254 return DB << Section.Decl; 12255 return DB << "a prior #pragma section"; 12256 } 12257 12258 bool ASTContext::mayExternalizeStaticVar(const Decl *D) const { 12259 bool IsStaticVar = 12260 isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static; 12261 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 12262 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 12263 (D->hasAttr<CUDAConstantAttr>() && 12264 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 12265 // CUDA/HIP: static managed variables need to be externalized since it is 12266 // a declaration in IR, therefore cannot have internal linkage. Kernels in 12267 // anonymous name space needs to be externalized to avoid duplicate symbols. 12268 return (IsStaticVar && 12269 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 12270 (D->hasAttr<CUDAGlobalAttr>() && 12271 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 12272 GVA_Internal); 12273 } 12274 12275 bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const { 12276 return mayExternalizeStaticVar(D) && 12277 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 12278 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 12279 } 12280 12281 StringRef ASTContext::getCUIDHash() const { 12282 if (!CUIDHash.empty()) 12283 return CUIDHash; 12284 if (LangOpts.CUID.empty()) 12285 return StringRef(); 12286 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 12287 return CUIDHash; 12288 } 12289