1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/SourceLocation.h" 62 #include "clang/Basic/SourceManager.h" 63 #include "clang/Basic/Specifiers.h" 64 #include "clang/Basic/TargetCXXABI.h" 65 #include "clang/Basic/TargetInfo.h" 66 #include "clang/Basic/XRayLists.h" 67 #include "llvm/ADT/APFixedPoint.h" 68 #include "llvm/ADT/APInt.h" 69 #include "llvm/ADT/APSInt.h" 70 #include "llvm/ADT/ArrayRef.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/DenseSet.h" 73 #include "llvm/ADT/FoldingSet.h" 74 #include "llvm/ADT/None.h" 75 #include "llvm/ADT/Optional.h" 76 #include "llvm/ADT/PointerUnion.h" 77 #include "llvm/ADT/STLExtras.h" 78 #include "llvm/ADT/SmallPtrSet.h" 79 #include "llvm/ADT/SmallVector.h" 80 #include "llvm/ADT/StringExtras.h" 81 #include "llvm/ADT/StringRef.h" 82 #include "llvm/ADT/Triple.h" 83 #include "llvm/Support/Capacity.h" 84 #include "llvm/Support/Casting.h" 85 #include "llvm/Support/Compiler.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/MD5.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace clang; 102 103 enum FloatingRank { 104 BFloat16Rank, 105 Float16Rank, 106 HalfRank, 107 FloatRank, 108 DoubleRank, 109 LongDoubleRank, 110 Float128Rank, 111 Ibm128Rank 112 }; 113 114 /// \returns location that is relevant when searching for Doc comments related 115 /// to \p D. 116 static SourceLocation getDeclLocForCommentSearch(const Decl *D, 117 SourceManager &SourceMgr) { 118 assert(D); 119 120 // User can not attach documentation to implicit declarations. 121 if (D->isImplicit()) 122 return {}; 123 124 // User can not attach documentation to implicit instantiations. 125 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 126 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 127 return {}; 128 } 129 130 if (const auto *VD = dyn_cast<VarDecl>(D)) { 131 if (VD->isStaticDataMember() && 132 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 133 return {}; 134 } 135 136 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 137 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 138 return {}; 139 } 140 141 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 142 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 143 if (TSK == TSK_ImplicitInstantiation || 144 TSK == TSK_Undeclared) 145 return {}; 146 } 147 148 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 149 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 150 return {}; 151 } 152 if (const auto *TD = dyn_cast<TagDecl>(D)) { 153 // When tag declaration (but not definition!) is part of the 154 // decl-specifier-seq of some other declaration, it doesn't get comment 155 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 156 return {}; 157 } 158 // TODO: handle comments for function parameters properly. 159 if (isa<ParmVarDecl>(D)) 160 return {}; 161 162 // TODO: we could look up template parameter documentation in the template 163 // documentation. 164 if (isa<TemplateTypeParmDecl>(D) || 165 isa<NonTypeTemplateParmDecl>(D) || 166 isa<TemplateTemplateParmDecl>(D)) 167 return {}; 168 169 // Find declaration location. 170 // For Objective-C declarations we generally don't expect to have multiple 171 // declarators, thus use declaration starting location as the "declaration 172 // location". 173 // For all other declarations multiple declarators are used quite frequently, 174 // so we use the location of the identifier as the "declaration location". 175 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 176 isa<ObjCPropertyDecl>(D) || 177 isa<RedeclarableTemplateDecl>(D) || 178 isa<ClassTemplateSpecializationDecl>(D) || 179 // Allow association with Y across {} in `typedef struct X {} Y`. 180 isa<TypedefDecl>(D)) 181 return D->getBeginLoc(); 182 183 const SourceLocation DeclLoc = D->getLocation(); 184 if (DeclLoc.isMacroID()) { 185 if (isa<TypedefDecl>(D)) { 186 // If location of the typedef name is in a macro, it is because being 187 // declared via a macro. Try using declaration's starting location as 188 // the "declaration location". 189 return D->getBeginLoc(); 190 } 191 192 if (const auto *TD = dyn_cast<TagDecl>(D)) { 193 // If location of the tag decl is inside a macro, but the spelling of 194 // the tag name comes from a macro argument, it looks like a special 195 // macro like NS_ENUM is being used to define the tag decl. In that 196 // case, adjust the source location to the expansion loc so that we can 197 // attach the comment to the tag decl. 198 if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition()) 199 return SourceMgr.getExpansionLoc(DeclLoc); 200 } 201 } 202 203 return DeclLoc; 204 } 205 206 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 207 const Decl *D, const SourceLocation RepresentativeLocForDecl, 208 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 209 // If the declaration doesn't map directly to a location in a file, we 210 // can't find the comment. 211 if (RepresentativeLocForDecl.isInvalid() || 212 !RepresentativeLocForDecl.isFileID()) 213 return nullptr; 214 215 // If there are no comments anywhere, we won't find anything. 216 if (CommentsInTheFile.empty()) 217 return nullptr; 218 219 // Decompose the location for the declaration and find the beginning of the 220 // file buffer. 221 const std::pair<FileID, unsigned> DeclLocDecomp = 222 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 223 224 // Slow path. 225 auto OffsetCommentBehindDecl = 226 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 227 228 // First check whether we have a trailing comment. 229 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 230 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 231 if ((CommentBehindDecl->isDocumentation() || 232 LangOpts.CommentOpts.ParseAllComments) && 233 CommentBehindDecl->isTrailingComment() && 234 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 235 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 236 237 // Check that Doxygen trailing comment comes after the declaration, starts 238 // on the same line and in the same file as the declaration. 239 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 240 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 241 OffsetCommentBehindDecl->first)) { 242 return CommentBehindDecl; 243 } 244 } 245 } 246 247 // The comment just after the declaration was not a trailing comment. 248 // Let's look at the previous comment. 249 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 250 return nullptr; 251 252 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 253 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 254 255 // Check that we actually have a non-member Doxygen comment. 256 if (!(CommentBeforeDecl->isDocumentation() || 257 LangOpts.CommentOpts.ParseAllComments) || 258 CommentBeforeDecl->isTrailingComment()) 259 return nullptr; 260 261 // Decompose the end of the comment. 262 const unsigned CommentEndOffset = 263 Comments.getCommentEndOffset(CommentBeforeDecl); 264 265 // Get the corresponding buffer. 266 bool Invalid = false; 267 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 268 &Invalid).data(); 269 if (Invalid) 270 return nullptr; 271 272 // Extract text between the comment and declaration. 273 StringRef Text(Buffer + CommentEndOffset, 274 DeclLocDecomp.second - CommentEndOffset); 275 276 // There should be no other declarations or preprocessor directives between 277 // comment and declaration. 278 if (Text.find_first_of(";{}#@") != StringRef::npos) 279 return nullptr; 280 281 return CommentBeforeDecl; 282 } 283 284 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 285 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 286 287 // If the declaration doesn't map directly to a location in a file, we 288 // can't find the comment. 289 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 290 return nullptr; 291 292 if (ExternalSource && !CommentsLoaded) { 293 ExternalSource->ReadComments(); 294 CommentsLoaded = true; 295 } 296 297 if (Comments.empty()) 298 return nullptr; 299 300 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 301 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 302 if (!CommentsInThisFile || CommentsInThisFile->empty()) 303 return nullptr; 304 305 return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); 306 } 307 308 void ASTContext::addComment(const RawComment &RC) { 309 assert(LangOpts.RetainCommentsFromSystemHeaders || 310 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 311 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 312 } 313 314 /// If we have a 'templated' declaration for a template, adjust 'D' to 315 /// refer to the actual template. 316 /// If we have an implicit instantiation, adjust 'D' to refer to template. 317 static const Decl &adjustDeclToTemplate(const Decl &D) { 318 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 319 // Is this function declaration part of a function template? 320 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 321 return *FTD; 322 323 // Nothing to do if function is not an implicit instantiation. 324 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 325 return D; 326 327 // Function is an implicit instantiation of a function template? 328 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 329 return *FTD; 330 331 // Function is instantiated from a member definition of a class template? 332 if (const FunctionDecl *MemberDecl = 333 FD->getInstantiatedFromMemberFunction()) 334 return *MemberDecl; 335 336 return D; 337 } 338 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 339 // Static data member is instantiated from a member definition of a class 340 // template? 341 if (VD->isStaticDataMember()) 342 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 343 return *MemberDecl; 344 345 return D; 346 } 347 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 348 // Is this class declaration part of a class template? 349 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 350 return *CTD; 351 352 // Class is an implicit instantiation of a class template or partial 353 // specialization? 354 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 355 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 356 return D; 357 llvm::PointerUnion<ClassTemplateDecl *, 358 ClassTemplatePartialSpecializationDecl *> 359 PU = CTSD->getSpecializedTemplateOrPartial(); 360 return PU.is<ClassTemplateDecl *>() 361 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 362 : *static_cast<const Decl *>( 363 PU.get<ClassTemplatePartialSpecializationDecl *>()); 364 } 365 366 // Class is instantiated from a member definition of a class template? 367 if (const MemberSpecializationInfo *Info = 368 CRD->getMemberSpecializationInfo()) 369 return *Info->getInstantiatedFrom(); 370 371 return D; 372 } 373 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 374 // Enum is instantiated from a member definition of a class template? 375 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 376 return *MemberDecl; 377 378 return D; 379 } 380 // FIXME: Adjust alias templates? 381 return D; 382 } 383 384 const RawComment *ASTContext::getRawCommentForAnyRedecl( 385 const Decl *D, 386 const Decl **OriginalDecl) const { 387 if (!D) { 388 if (OriginalDecl) 389 OriginalDecl = nullptr; 390 return nullptr; 391 } 392 393 D = &adjustDeclToTemplate(*D); 394 395 // Any comment directly attached to D? 396 { 397 auto DeclComment = DeclRawComments.find(D); 398 if (DeclComment != DeclRawComments.end()) { 399 if (OriginalDecl) 400 *OriginalDecl = D; 401 return DeclComment->second; 402 } 403 } 404 405 // Any comment attached to any redeclaration of D? 406 const Decl *CanonicalD = D->getCanonicalDecl(); 407 if (!CanonicalD) 408 return nullptr; 409 410 { 411 auto RedeclComment = RedeclChainComments.find(CanonicalD); 412 if (RedeclComment != RedeclChainComments.end()) { 413 if (OriginalDecl) 414 *OriginalDecl = RedeclComment->second; 415 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 416 assert(CommentAtRedecl != DeclRawComments.end() && 417 "This decl is supposed to have comment attached."); 418 return CommentAtRedecl->second; 419 } 420 } 421 422 // Any redeclarations of D that we haven't checked for comments yet? 423 // We can't use DenseMap::iterator directly since it'd get invalid. 424 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 425 auto LookupRes = CommentlessRedeclChains.find(CanonicalD); 426 if (LookupRes != CommentlessRedeclChains.end()) 427 return LookupRes->second; 428 return nullptr; 429 }(); 430 431 for (const auto Redecl : D->redecls()) { 432 assert(Redecl); 433 // Skip all redeclarations that have been checked previously. 434 if (LastCheckedRedecl) { 435 if (LastCheckedRedecl == Redecl) { 436 LastCheckedRedecl = nullptr; 437 } 438 continue; 439 } 440 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 441 if (RedeclComment) { 442 cacheRawCommentForDecl(*Redecl, *RedeclComment); 443 if (OriginalDecl) 444 *OriginalDecl = Redecl; 445 return RedeclComment; 446 } 447 CommentlessRedeclChains[CanonicalD] = Redecl; 448 } 449 450 if (OriginalDecl) 451 *OriginalDecl = nullptr; 452 return nullptr; 453 } 454 455 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 456 const RawComment &Comment) const { 457 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 458 DeclRawComments.try_emplace(&OriginalD, &Comment); 459 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 460 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 461 CommentlessRedeclChains.erase(CanonicalDecl); 462 } 463 464 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 465 SmallVectorImpl<const NamedDecl *> &Redeclared) { 466 const DeclContext *DC = ObjCMethod->getDeclContext(); 467 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 468 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 469 if (!ID) 470 return; 471 // Add redeclared method here. 472 for (const auto *Ext : ID->known_extensions()) { 473 if (ObjCMethodDecl *RedeclaredMethod = 474 Ext->getMethod(ObjCMethod->getSelector(), 475 ObjCMethod->isInstanceMethod())) 476 Redeclared.push_back(RedeclaredMethod); 477 } 478 } 479 } 480 481 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 482 const Preprocessor *PP) { 483 if (Comments.empty() || Decls.empty()) 484 return; 485 486 FileID File; 487 for (Decl *D : Decls) { 488 SourceLocation Loc = D->getLocation(); 489 if (Loc.isValid()) { 490 // See if there are any new comments that are not attached to a decl. 491 // The location doesn't have to be precise - we care only about the file. 492 File = SourceMgr.getDecomposedLoc(Loc).first; 493 break; 494 } 495 } 496 497 if (File.isInvalid()) 498 return; 499 500 auto CommentsInThisFile = Comments.getCommentsInFile(File); 501 if (!CommentsInThisFile || CommentsInThisFile->empty() || 502 CommentsInThisFile->rbegin()->second->isAttached()) 503 return; 504 505 // There is at least one comment not attached to a decl. 506 // Maybe it should be attached to one of Decls? 507 // 508 // Note that this way we pick up not only comments that precede the 509 // declaration, but also comments that *follow* the declaration -- thanks to 510 // the lookahead in the lexer: we've consumed the semicolon and looked 511 // ahead through comments. 512 513 for (const Decl *D : Decls) { 514 assert(D); 515 if (D->isInvalidDecl()) 516 continue; 517 518 D = &adjustDeclToTemplate(*D); 519 520 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 521 522 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 523 continue; 524 525 if (DeclRawComments.count(D) > 0) 526 continue; 527 528 if (RawComment *const DocComment = 529 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { 530 cacheRawCommentForDecl(*D, *DocComment); 531 comments::FullComment *FC = DocComment->parse(*this, PP, D); 532 ParsedComments[D->getCanonicalDecl()] = FC; 533 } 534 } 535 } 536 537 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 538 const Decl *D) const { 539 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 540 ThisDeclInfo->CommentDecl = D; 541 ThisDeclInfo->IsFilled = false; 542 ThisDeclInfo->fill(); 543 ThisDeclInfo->CommentDecl = FC->getDecl(); 544 if (!ThisDeclInfo->TemplateParameters) 545 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 546 comments::FullComment *CFC = 547 new (*this) comments::FullComment(FC->getBlocks(), 548 ThisDeclInfo); 549 return CFC; 550 } 551 552 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 553 const RawComment *RC = getRawCommentForDeclNoCache(D); 554 return RC ? RC->parse(*this, nullptr, D) : nullptr; 555 } 556 557 comments::FullComment *ASTContext::getCommentForDecl( 558 const Decl *D, 559 const Preprocessor *PP) const { 560 if (!D || D->isInvalidDecl()) 561 return nullptr; 562 D = &adjustDeclToTemplate(*D); 563 564 const Decl *Canonical = D->getCanonicalDecl(); 565 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 566 ParsedComments.find(Canonical); 567 568 if (Pos != ParsedComments.end()) { 569 if (Canonical != D) { 570 comments::FullComment *FC = Pos->second; 571 comments::FullComment *CFC = cloneFullComment(FC, D); 572 return CFC; 573 } 574 return Pos->second; 575 } 576 577 const Decl *OriginalDecl = nullptr; 578 579 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 580 if (!RC) { 581 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 582 SmallVector<const NamedDecl*, 8> Overridden; 583 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 584 if (OMD && OMD->isPropertyAccessor()) 585 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 586 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 587 return cloneFullComment(FC, D); 588 if (OMD) 589 addRedeclaredMethods(OMD, Overridden); 590 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 591 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 592 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 593 return cloneFullComment(FC, D); 594 } 595 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 596 // Attach any tag type's documentation to its typedef if latter 597 // does not have one of its own. 598 QualType QT = TD->getUnderlyingType(); 599 if (const auto *TT = QT->getAs<TagType>()) 600 if (const Decl *TD = TT->getDecl()) 601 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 602 return cloneFullComment(FC, D); 603 } 604 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 605 while (IC->getSuperClass()) { 606 IC = IC->getSuperClass(); 607 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 608 return cloneFullComment(FC, D); 609 } 610 } 611 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 612 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 613 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 614 return cloneFullComment(FC, D); 615 } 616 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 617 if (!(RD = RD->getDefinition())) 618 return nullptr; 619 // Check non-virtual bases. 620 for (const auto &I : RD->bases()) { 621 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 622 continue; 623 QualType Ty = I.getType(); 624 if (Ty.isNull()) 625 continue; 626 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 627 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 628 continue; 629 630 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 631 return cloneFullComment(FC, D); 632 } 633 } 634 // Check virtual bases. 635 for (const auto &I : RD->vbases()) { 636 if (I.getAccessSpecifier() != AS_public) 637 continue; 638 QualType Ty = I.getType(); 639 if (Ty.isNull()) 640 continue; 641 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 642 if (!(VirtualBase= VirtualBase->getDefinition())) 643 continue; 644 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 645 return cloneFullComment(FC, D); 646 } 647 } 648 } 649 return nullptr; 650 } 651 652 // If the RawComment was attached to other redeclaration of this Decl, we 653 // should parse the comment in context of that other Decl. This is important 654 // because comments can contain references to parameter names which can be 655 // different across redeclarations. 656 if (D != OriginalDecl && OriginalDecl) 657 return getCommentForDecl(OriginalDecl, PP); 658 659 comments::FullComment *FC = RC->parse(*this, PP, D); 660 ParsedComments[Canonical] = FC; 661 return FC; 662 } 663 664 void 665 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 666 const ASTContext &C, 667 TemplateTemplateParmDecl *Parm) { 668 ID.AddInteger(Parm->getDepth()); 669 ID.AddInteger(Parm->getPosition()); 670 ID.AddBoolean(Parm->isParameterPack()); 671 672 TemplateParameterList *Params = Parm->getTemplateParameters(); 673 ID.AddInteger(Params->size()); 674 for (TemplateParameterList::const_iterator P = Params->begin(), 675 PEnd = Params->end(); 676 P != PEnd; ++P) { 677 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 678 ID.AddInteger(0); 679 ID.AddBoolean(TTP->isParameterPack()); 680 const TypeConstraint *TC = TTP->getTypeConstraint(); 681 ID.AddBoolean(TC != nullptr); 682 if (TC) 683 TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, 684 /*Canonical=*/true); 685 if (TTP->isExpandedParameterPack()) { 686 ID.AddBoolean(true); 687 ID.AddInteger(TTP->getNumExpansionParameters()); 688 } else 689 ID.AddBoolean(false); 690 continue; 691 } 692 693 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 694 ID.AddInteger(1); 695 ID.AddBoolean(NTTP->isParameterPack()); 696 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); 697 if (NTTP->isExpandedParameterPack()) { 698 ID.AddBoolean(true); 699 ID.AddInteger(NTTP->getNumExpansionTypes()); 700 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 701 QualType T = NTTP->getExpansionType(I); 702 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 703 } 704 } else 705 ID.AddBoolean(false); 706 continue; 707 } 708 709 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 710 ID.AddInteger(2); 711 Profile(ID, C, TTP); 712 } 713 Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); 714 ID.AddBoolean(RequiresClause != nullptr); 715 if (RequiresClause) 716 RequiresClause->Profile(ID, C, /*Canonical=*/true); 717 } 718 719 static Expr * 720 canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, 721 QualType ConstrainedType) { 722 // This is a bit ugly - we need to form a new immediately-declared 723 // constraint that references the new parameter; this would ideally 724 // require semantic analysis (e.g. template<C T> struct S {}; - the 725 // converted arguments of C<T> could be an argument pack if C is 726 // declared as template<typename... T> concept C = ...). 727 // We don't have semantic analysis here so we dig deep into the 728 // ready-made constraint expr and change the thing manually. 729 ConceptSpecializationExpr *CSE; 730 if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC)) 731 CSE = cast<ConceptSpecializationExpr>(Fold->getLHS()); 732 else 733 CSE = cast<ConceptSpecializationExpr>(IDC); 734 ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments(); 735 SmallVector<TemplateArgument, 3> NewConverted; 736 NewConverted.reserve(OldConverted.size()); 737 if (OldConverted.front().getKind() == TemplateArgument::Pack) { 738 // The case: 739 // template<typename... T> concept C = true; 740 // template<C<int> T> struct S; -> constraint is C<{T, int}> 741 NewConverted.push_back(ConstrainedType); 742 llvm::append_range(NewConverted, 743 OldConverted.front().pack_elements().drop_front(1)); 744 TemplateArgument NewPack(NewConverted); 745 746 NewConverted.clear(); 747 NewConverted.push_back(NewPack); 748 assert(OldConverted.size() == 1 && 749 "Template parameter pack should be the last parameter"); 750 } else { 751 assert(OldConverted.front().getKind() == TemplateArgument::Type && 752 "Unexpected first argument kind for immediately-declared " 753 "constraint"); 754 NewConverted.push_back(ConstrainedType); 755 llvm::append_range(NewConverted, OldConverted.drop_front(1)); 756 } 757 Expr *NewIDC = ConceptSpecializationExpr::Create( 758 C, CSE->getNamedConcept(), NewConverted, nullptr, 759 CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack()); 760 761 if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC)) 762 NewIDC = new (C) CXXFoldExpr( 763 OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC, 764 BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, 765 SourceLocation(), /*NumExpansions=*/None); 766 return NewIDC; 767 } 768 769 TemplateTemplateParmDecl * 770 ASTContext::getCanonicalTemplateTemplateParmDecl( 771 TemplateTemplateParmDecl *TTP) const { 772 // Check if we already have a canonical template template parameter. 773 llvm::FoldingSetNodeID ID; 774 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 775 void *InsertPos = nullptr; 776 CanonicalTemplateTemplateParm *Canonical 777 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 778 if (Canonical) 779 return Canonical->getParam(); 780 781 // Build a canonical template parameter list. 782 TemplateParameterList *Params = TTP->getTemplateParameters(); 783 SmallVector<NamedDecl *, 4> CanonParams; 784 CanonParams.reserve(Params->size()); 785 for (TemplateParameterList::const_iterator P = Params->begin(), 786 PEnd = Params->end(); 787 P != PEnd; ++P) { 788 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 789 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this, 790 getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 791 TTP->getDepth(), TTP->getIndex(), nullptr, false, 792 TTP->isParameterPack(), TTP->hasTypeConstraint(), 793 TTP->isExpandedParameterPack() ? 794 llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None); 795 if (const auto *TC = TTP->getTypeConstraint()) { 796 QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); 797 Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( 798 *this, TC->getImmediatelyDeclaredConstraint(), 799 ParamAsArgument); 800 TemplateArgumentListInfo CanonArgsAsWritten; 801 if (auto *Args = TC->getTemplateArgsAsWritten()) 802 for (const auto &ArgLoc : Args->arguments()) 803 CanonArgsAsWritten.addArgument( 804 TemplateArgumentLoc(ArgLoc.getArgument(), 805 TemplateArgumentLocInfo())); 806 NewTTP->setTypeConstraint( 807 NestedNameSpecifierLoc(), 808 DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), 809 SourceLocation()), /*FoundDecl=*/nullptr, 810 // Actually canonicalizing a TemplateArgumentLoc is difficult so we 811 // simply omit the ArgsAsWritten 812 TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); 813 } 814 CanonParams.push_back(NewTTP); 815 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 816 QualType T = getCanonicalType(NTTP->getType()); 817 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 818 NonTypeTemplateParmDecl *Param; 819 if (NTTP->isExpandedParameterPack()) { 820 SmallVector<QualType, 2> ExpandedTypes; 821 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 822 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 823 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 824 ExpandedTInfos.push_back( 825 getTrivialTypeSourceInfo(ExpandedTypes.back())); 826 } 827 828 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 829 SourceLocation(), 830 SourceLocation(), 831 NTTP->getDepth(), 832 NTTP->getPosition(), nullptr, 833 T, 834 TInfo, 835 ExpandedTypes, 836 ExpandedTInfos); 837 } else { 838 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 839 SourceLocation(), 840 SourceLocation(), 841 NTTP->getDepth(), 842 NTTP->getPosition(), nullptr, 843 T, 844 NTTP->isParameterPack(), 845 TInfo); 846 } 847 if (AutoType *AT = T->getContainedAutoType()) { 848 if (AT->isConstrained()) { 849 Param->setPlaceholderTypeConstraint( 850 canonicalizeImmediatelyDeclaredConstraint( 851 *this, NTTP->getPlaceholderTypeConstraint(), T)); 852 } 853 } 854 CanonParams.push_back(Param); 855 856 } else 857 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 858 cast<TemplateTemplateParmDecl>(*P))); 859 } 860 861 Expr *CanonRequiresClause = nullptr; 862 if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) 863 CanonRequiresClause = RequiresClause; 864 865 TemplateTemplateParmDecl *CanonTTP 866 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 867 SourceLocation(), TTP->getDepth(), 868 TTP->getPosition(), 869 TTP->isParameterPack(), 870 nullptr, 871 TemplateParameterList::Create(*this, SourceLocation(), 872 SourceLocation(), 873 CanonParams, 874 SourceLocation(), 875 CanonRequiresClause)); 876 877 // Get the new insert position for the node we care about. 878 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 879 assert(!Canonical && "Shouldn't be in the map!"); 880 (void)Canonical; 881 882 // Create the canonical template template parameter entry. 883 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 884 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 885 return CanonTTP; 886 } 887 888 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 889 auto Kind = getTargetInfo().getCXXABI().getKind(); 890 return getLangOpts().CXXABI.value_or(Kind); 891 } 892 893 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 894 if (!LangOpts.CPlusPlus) return nullptr; 895 896 switch (getCXXABIKind()) { 897 case TargetCXXABI::AppleARM64: 898 case TargetCXXABI::Fuchsia: 899 case TargetCXXABI::GenericARM: // Same as Itanium at this level 900 case TargetCXXABI::iOS: 901 case TargetCXXABI::WatchOS: 902 case TargetCXXABI::GenericAArch64: 903 case TargetCXXABI::GenericMIPS: 904 case TargetCXXABI::GenericItanium: 905 case TargetCXXABI::WebAssembly: 906 case TargetCXXABI::XL: 907 return CreateItaniumCXXABI(*this); 908 case TargetCXXABI::Microsoft: 909 return CreateMicrosoftCXXABI(*this); 910 } 911 llvm_unreachable("Invalid CXXABI type!"); 912 } 913 914 interp::Context &ASTContext::getInterpContext() { 915 if (!InterpContext) { 916 InterpContext.reset(new interp::Context(*this)); 917 } 918 return *InterpContext.get(); 919 } 920 921 ParentMapContext &ASTContext::getParentMapContext() { 922 if (!ParentMapCtx) 923 ParentMapCtx.reset(new ParentMapContext(*this)); 924 return *ParentMapCtx.get(); 925 } 926 927 static const LangASMap *getAddressSpaceMap(const TargetInfo &T, 928 const LangOptions &LOpts) { 929 if (LOpts.FakeAddressSpaceMap) { 930 // The fake address space map must have a distinct entry for each 931 // language-specific address space. 932 static const unsigned FakeAddrSpaceMap[] = { 933 0, // Default 934 1, // opencl_global 935 3, // opencl_local 936 2, // opencl_constant 937 0, // opencl_private 938 4, // opencl_generic 939 5, // opencl_global_device 940 6, // opencl_global_host 941 7, // cuda_device 942 8, // cuda_constant 943 9, // cuda_shared 944 1, // sycl_global 945 5, // sycl_global_device 946 6, // sycl_global_host 947 3, // sycl_local 948 0, // sycl_private 949 10, // ptr32_sptr 950 11, // ptr32_uptr 951 12 // ptr64 952 }; 953 return &FakeAddrSpaceMap; 954 } else { 955 return &T.getAddressSpaceMap(); 956 } 957 } 958 959 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 960 const LangOptions &LangOpts) { 961 switch (LangOpts.getAddressSpaceMapMangling()) { 962 case LangOptions::ASMM_Target: 963 return TI.useAddressSpaceMapMangling(); 964 case LangOptions::ASMM_On: 965 return true; 966 case LangOptions::ASMM_Off: 967 return false; 968 } 969 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 970 } 971 972 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 973 IdentifierTable &idents, SelectorTable &sels, 974 Builtin::Context &builtins, TranslationUnitKind TUKind) 975 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 976 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 977 TemplateSpecializationTypes(this_()), 978 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 979 SubstTemplateTemplateParmPacks(this_()), 980 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 981 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 982 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 983 LangOpts.XRayNeverInstrumentFiles, 984 LangOpts.XRayAttrListFiles, SM)), 985 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 986 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 987 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 988 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 989 CompCategories(this_()), LastSDM(nullptr, 0) { 990 addTranslationUnitDecl(); 991 } 992 993 void ASTContext::cleanup() { 994 // Release the DenseMaps associated with DeclContext objects. 995 // FIXME: Is this the ideal solution? 996 ReleaseDeclContextMaps(); 997 998 // Call all of the deallocation functions on all of their targets. 999 for (auto &Pair : Deallocations) 1000 (Pair.first)(Pair.second); 1001 Deallocations.clear(); 1002 1003 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 1004 // because they can contain DenseMaps. 1005 for (llvm::DenseMap<const ObjCContainerDecl*, 1006 const ASTRecordLayout*>::iterator 1007 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 1008 // Increment in loop to prevent using deallocated memory. 1009 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1010 R->Destroy(*this); 1011 ObjCLayouts.clear(); 1012 1013 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 1014 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 1015 // Increment in loop to prevent using deallocated memory. 1016 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1017 R->Destroy(*this); 1018 } 1019 ASTRecordLayouts.clear(); 1020 1021 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 1022 AEnd = DeclAttrs.end(); 1023 A != AEnd; ++A) 1024 A->second->~AttrVec(); 1025 DeclAttrs.clear(); 1026 1027 for (const auto &Value : ModuleInitializers) 1028 Value.second->~PerModuleInitializers(); 1029 ModuleInitializers.clear(); 1030 } 1031 1032 ASTContext::~ASTContext() { cleanup(); } 1033 1034 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 1035 TraversalScope = TopLevelDecls; 1036 getParentMapContext().clear(); 1037 } 1038 1039 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 1040 Deallocations.push_back({Callback, Data}); 1041 } 1042 1043 void 1044 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 1045 ExternalSource = std::move(Source); 1046 } 1047 1048 void ASTContext::PrintStats() const { 1049 llvm::errs() << "\n*** AST Context Stats:\n"; 1050 llvm::errs() << " " << Types.size() << " types total.\n"; 1051 1052 unsigned counts[] = { 1053 #define TYPE(Name, Parent) 0, 1054 #define ABSTRACT_TYPE(Name, Parent) 1055 #include "clang/AST/TypeNodes.inc" 1056 0 // Extra 1057 }; 1058 1059 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 1060 Type *T = Types[i]; 1061 counts[(unsigned)T->getTypeClass()]++; 1062 } 1063 1064 unsigned Idx = 0; 1065 unsigned TotalBytes = 0; 1066 #define TYPE(Name, Parent) \ 1067 if (counts[Idx]) \ 1068 llvm::errs() << " " << counts[Idx] << " " << #Name \ 1069 << " types, " << sizeof(Name##Type) << " each " \ 1070 << "(" << counts[Idx] * sizeof(Name##Type) \ 1071 << " bytes)\n"; \ 1072 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 1073 ++Idx; 1074 #define ABSTRACT_TYPE(Name, Parent) 1075 #include "clang/AST/TypeNodes.inc" 1076 1077 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 1078 1079 // Implicit special member functions. 1080 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 1081 << NumImplicitDefaultConstructors 1082 << " implicit default constructors created\n"; 1083 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 1084 << NumImplicitCopyConstructors 1085 << " implicit copy constructors created\n"; 1086 if (getLangOpts().CPlusPlus) 1087 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 1088 << NumImplicitMoveConstructors 1089 << " implicit move constructors created\n"; 1090 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 1091 << NumImplicitCopyAssignmentOperators 1092 << " implicit copy assignment operators created\n"; 1093 if (getLangOpts().CPlusPlus) 1094 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 1095 << NumImplicitMoveAssignmentOperators 1096 << " implicit move assignment operators created\n"; 1097 llvm::errs() << NumImplicitDestructorsDeclared << "/" 1098 << NumImplicitDestructors 1099 << " implicit destructors created\n"; 1100 1101 if (ExternalSource) { 1102 llvm::errs() << "\n"; 1103 ExternalSource->PrintStats(); 1104 } 1105 1106 BumpAlloc.PrintStats(); 1107 } 1108 1109 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1110 bool NotifyListeners) { 1111 if (NotifyListeners) 1112 if (auto *Listener = getASTMutationListener()) 1113 Listener->RedefinedHiddenDefinition(ND, M); 1114 1115 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1116 } 1117 1118 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1119 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1120 if (It == MergedDefModules.end()) 1121 return; 1122 1123 auto &Merged = It->second; 1124 llvm::DenseSet<Module*> Found; 1125 for (Module *&M : Merged) 1126 if (!Found.insert(M).second) 1127 M = nullptr; 1128 llvm::erase_value(Merged, nullptr); 1129 } 1130 1131 ArrayRef<Module *> 1132 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1133 auto MergedIt = 1134 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1135 if (MergedIt == MergedDefModules.end()) 1136 return None; 1137 return MergedIt->second; 1138 } 1139 1140 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1141 if (LazyInitializers.empty()) 1142 return; 1143 1144 auto *Source = Ctx.getExternalSource(); 1145 assert(Source && "lazy initializers but no external source"); 1146 1147 auto LazyInits = std::move(LazyInitializers); 1148 LazyInitializers.clear(); 1149 1150 for (auto ID : LazyInits) 1151 Initializers.push_back(Source->GetExternalDecl(ID)); 1152 1153 assert(LazyInitializers.empty() && 1154 "GetExternalDecl for lazy module initializer added more inits"); 1155 } 1156 1157 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1158 // One special case: if we add a module initializer that imports another 1159 // module, and that module's only initializer is an ImportDecl, simplify. 1160 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1161 auto It = ModuleInitializers.find(ID->getImportedModule()); 1162 1163 // Maybe the ImportDecl does nothing at all. (Common case.) 1164 if (It == ModuleInitializers.end()) 1165 return; 1166 1167 // Maybe the ImportDecl only imports another ImportDecl. 1168 auto &Imported = *It->second; 1169 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1170 Imported.resolve(*this); 1171 auto *OnlyDecl = Imported.Initializers.front(); 1172 if (isa<ImportDecl>(OnlyDecl)) 1173 D = OnlyDecl; 1174 } 1175 } 1176 1177 auto *&Inits = ModuleInitializers[M]; 1178 if (!Inits) 1179 Inits = new (*this) PerModuleInitializers; 1180 Inits->Initializers.push_back(D); 1181 } 1182 1183 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1184 auto *&Inits = ModuleInitializers[M]; 1185 if (!Inits) 1186 Inits = new (*this) PerModuleInitializers; 1187 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1188 IDs.begin(), IDs.end()); 1189 } 1190 1191 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1192 auto It = ModuleInitializers.find(M); 1193 if (It == ModuleInitializers.end()) 1194 return None; 1195 1196 auto *Inits = It->second; 1197 Inits->resolve(*this); 1198 return Inits->Initializers; 1199 } 1200 1201 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1202 if (!ExternCContext) 1203 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1204 1205 return ExternCContext; 1206 } 1207 1208 BuiltinTemplateDecl * 1209 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1210 const IdentifierInfo *II) const { 1211 auto *BuiltinTemplate = 1212 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1213 BuiltinTemplate->setImplicit(); 1214 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1215 1216 return BuiltinTemplate; 1217 } 1218 1219 BuiltinTemplateDecl * 1220 ASTContext::getMakeIntegerSeqDecl() const { 1221 if (!MakeIntegerSeqDecl) 1222 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1223 getMakeIntegerSeqName()); 1224 return MakeIntegerSeqDecl; 1225 } 1226 1227 BuiltinTemplateDecl * 1228 ASTContext::getTypePackElementDecl() const { 1229 if (!TypePackElementDecl) 1230 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1231 getTypePackElementName()); 1232 return TypePackElementDecl; 1233 } 1234 1235 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1236 RecordDecl::TagKind TK) const { 1237 SourceLocation Loc; 1238 RecordDecl *NewDecl; 1239 if (getLangOpts().CPlusPlus) 1240 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1241 Loc, &Idents.get(Name)); 1242 else 1243 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1244 &Idents.get(Name)); 1245 NewDecl->setImplicit(); 1246 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1247 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1248 return NewDecl; 1249 } 1250 1251 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1252 StringRef Name) const { 1253 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1254 TypedefDecl *NewDecl = TypedefDecl::Create( 1255 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1256 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1257 NewDecl->setImplicit(); 1258 return NewDecl; 1259 } 1260 1261 TypedefDecl *ASTContext::getInt128Decl() const { 1262 if (!Int128Decl) 1263 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1264 return Int128Decl; 1265 } 1266 1267 TypedefDecl *ASTContext::getUInt128Decl() const { 1268 if (!UInt128Decl) 1269 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1270 return UInt128Decl; 1271 } 1272 1273 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1274 auto *Ty = new (*this, TypeAlignment) BuiltinType(K); 1275 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1276 Types.push_back(Ty); 1277 } 1278 1279 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1280 const TargetInfo *AuxTarget) { 1281 assert((!this->Target || this->Target == &Target) && 1282 "Incorrect target reinitialization"); 1283 assert(VoidTy.isNull() && "Context reinitialized?"); 1284 1285 this->Target = &Target; 1286 this->AuxTarget = AuxTarget; 1287 1288 ABI.reset(createCXXABI(Target)); 1289 AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); 1290 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1291 1292 // C99 6.2.5p19. 1293 InitBuiltinType(VoidTy, BuiltinType::Void); 1294 1295 // C99 6.2.5p2. 1296 InitBuiltinType(BoolTy, BuiltinType::Bool); 1297 // C99 6.2.5p3. 1298 if (LangOpts.CharIsSigned) 1299 InitBuiltinType(CharTy, BuiltinType::Char_S); 1300 else 1301 InitBuiltinType(CharTy, BuiltinType::Char_U); 1302 // C99 6.2.5p4. 1303 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1304 InitBuiltinType(ShortTy, BuiltinType::Short); 1305 InitBuiltinType(IntTy, BuiltinType::Int); 1306 InitBuiltinType(LongTy, BuiltinType::Long); 1307 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1308 1309 // C99 6.2.5p6. 1310 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1311 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1312 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1313 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1314 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1315 1316 // C99 6.2.5p10. 1317 InitBuiltinType(FloatTy, BuiltinType::Float); 1318 InitBuiltinType(DoubleTy, BuiltinType::Double); 1319 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1320 1321 // GNU extension, __float128 for IEEE quadruple precision 1322 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1323 1324 // __ibm128 for IBM extended precision 1325 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1326 1327 // C11 extension ISO/IEC TS 18661-3 1328 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1329 1330 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1331 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1332 InitBuiltinType(AccumTy, BuiltinType::Accum); 1333 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1334 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1335 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1336 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1337 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1338 InitBuiltinType(FractTy, BuiltinType::Fract); 1339 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1340 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1341 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1342 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1343 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1344 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1345 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1346 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1347 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1348 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1349 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1350 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1351 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1352 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1353 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1354 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1355 1356 // GNU extension, 128-bit integers. 1357 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1358 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1359 1360 // C++ 3.9.1p5 1361 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1362 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1363 else // -fshort-wchar makes wchar_t be unsigned. 1364 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1365 if (LangOpts.CPlusPlus && LangOpts.WChar) 1366 WideCharTy = WCharTy; 1367 else { 1368 // C99 (or C++ using -fno-wchar). 1369 WideCharTy = getFromTargetType(Target.getWCharType()); 1370 } 1371 1372 WIntTy = getFromTargetType(Target.getWIntType()); 1373 1374 // C++20 (proposed) 1375 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1376 1377 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1378 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1379 else // C99 1380 Char16Ty = getFromTargetType(Target.getChar16Type()); 1381 1382 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1383 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1384 else // C99 1385 Char32Ty = getFromTargetType(Target.getChar32Type()); 1386 1387 // Placeholder type for type-dependent expressions whose type is 1388 // completely unknown. No code should ever check a type against 1389 // DependentTy and users should never see it; however, it is here to 1390 // help diagnose failures to properly check for type-dependent 1391 // expressions. 1392 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1393 1394 // Placeholder type for functions. 1395 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1396 1397 // Placeholder type for bound members. 1398 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1399 1400 // Placeholder type for pseudo-objects. 1401 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1402 1403 // "any" type; useful for debugger-like clients. 1404 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1405 1406 // Placeholder type for unbridged ARC casts. 1407 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1408 1409 // Placeholder type for builtin functions. 1410 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1411 1412 // Placeholder type for OMP array sections. 1413 if (LangOpts.OpenMP) { 1414 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1415 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1416 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1417 } 1418 if (LangOpts.MatrixTypes) 1419 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1420 1421 // Builtin types for 'id', 'Class', and 'SEL'. 1422 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1423 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1424 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1425 1426 if (LangOpts.OpenCL) { 1427 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1428 InitBuiltinType(SingletonId, BuiltinType::Id); 1429 #include "clang/Basic/OpenCLImageTypes.def" 1430 1431 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1432 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1433 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1434 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1435 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1436 1437 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1438 InitBuiltinType(Id##Ty, BuiltinType::Id); 1439 #include "clang/Basic/OpenCLExtensionTypes.def" 1440 } 1441 1442 if (Target.hasAArch64SVETypes()) { 1443 #define SVE_TYPE(Name, Id, SingletonId) \ 1444 InitBuiltinType(SingletonId, BuiltinType::Id); 1445 #include "clang/Basic/AArch64SVEACLETypes.def" 1446 } 1447 1448 if (Target.getTriple().isPPC64()) { 1449 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1450 InitBuiltinType(Id##Ty, BuiltinType::Id); 1451 #include "clang/Basic/PPCTypes.def" 1452 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1453 InitBuiltinType(Id##Ty, BuiltinType::Id); 1454 #include "clang/Basic/PPCTypes.def" 1455 } 1456 1457 if (Target.hasRISCVVTypes()) { 1458 #define RVV_TYPE(Name, Id, SingletonId) \ 1459 InitBuiltinType(SingletonId, BuiltinType::Id); 1460 #include "clang/Basic/RISCVVTypes.def" 1461 } 1462 1463 // Builtin type for __objc_yes and __objc_no 1464 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1465 SignedCharTy : BoolTy); 1466 1467 ObjCConstantStringType = QualType(); 1468 1469 ObjCSuperType = QualType(); 1470 1471 // void * type 1472 if (LangOpts.OpenCLGenericAddressSpace) { 1473 auto Q = VoidTy.getQualifiers(); 1474 Q.setAddressSpace(LangAS::opencl_generic); 1475 VoidPtrTy = getPointerType(getCanonicalType( 1476 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1477 } else { 1478 VoidPtrTy = getPointerType(VoidTy); 1479 } 1480 1481 // nullptr type (C++0x 2.14.7) 1482 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1483 1484 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1485 InitBuiltinType(HalfTy, BuiltinType::Half); 1486 1487 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1488 1489 // Builtin type used to help define __builtin_va_list. 1490 VaListTagDecl = nullptr; 1491 1492 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1493 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1494 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1495 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1496 } 1497 } 1498 1499 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1500 return SourceMgr.getDiagnostics(); 1501 } 1502 1503 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1504 AttrVec *&Result = DeclAttrs[D]; 1505 if (!Result) { 1506 void *Mem = Allocate(sizeof(AttrVec)); 1507 Result = new (Mem) AttrVec; 1508 } 1509 1510 return *Result; 1511 } 1512 1513 /// Erase the attributes corresponding to the given declaration. 1514 void ASTContext::eraseDeclAttrs(const Decl *D) { 1515 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1516 if (Pos != DeclAttrs.end()) { 1517 Pos->second->~AttrVec(); 1518 DeclAttrs.erase(Pos); 1519 } 1520 } 1521 1522 // FIXME: Remove ? 1523 MemberSpecializationInfo * 1524 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1525 assert(Var->isStaticDataMember() && "Not a static data member"); 1526 return getTemplateOrSpecializationInfo(Var) 1527 .dyn_cast<MemberSpecializationInfo *>(); 1528 } 1529 1530 ASTContext::TemplateOrSpecializationInfo 1531 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1532 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1533 TemplateOrInstantiation.find(Var); 1534 if (Pos == TemplateOrInstantiation.end()) 1535 return {}; 1536 1537 return Pos->second; 1538 } 1539 1540 void 1541 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1542 TemplateSpecializationKind TSK, 1543 SourceLocation PointOfInstantiation) { 1544 assert(Inst->isStaticDataMember() && "Not a static data member"); 1545 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1546 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1547 Tmpl, TSK, PointOfInstantiation)); 1548 } 1549 1550 void 1551 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1552 TemplateOrSpecializationInfo TSI) { 1553 assert(!TemplateOrInstantiation[Inst] && 1554 "Already noted what the variable was instantiated from"); 1555 TemplateOrInstantiation[Inst] = TSI; 1556 } 1557 1558 NamedDecl * 1559 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1560 auto Pos = InstantiatedFromUsingDecl.find(UUD); 1561 if (Pos == InstantiatedFromUsingDecl.end()) 1562 return nullptr; 1563 1564 return Pos->second; 1565 } 1566 1567 void 1568 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1569 assert((isa<UsingDecl>(Pattern) || 1570 isa<UnresolvedUsingValueDecl>(Pattern) || 1571 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1572 "pattern decl is not a using decl"); 1573 assert((isa<UsingDecl>(Inst) || 1574 isa<UnresolvedUsingValueDecl>(Inst) || 1575 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1576 "instantiation did not produce a using decl"); 1577 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1578 InstantiatedFromUsingDecl[Inst] = Pattern; 1579 } 1580 1581 UsingEnumDecl * 1582 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1583 auto Pos = InstantiatedFromUsingEnumDecl.find(UUD); 1584 if (Pos == InstantiatedFromUsingEnumDecl.end()) 1585 return nullptr; 1586 1587 return Pos->second; 1588 } 1589 1590 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1591 UsingEnumDecl *Pattern) { 1592 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1593 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1594 } 1595 1596 UsingShadowDecl * 1597 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1598 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 1599 = InstantiatedFromUsingShadowDecl.find(Inst); 1600 if (Pos == InstantiatedFromUsingShadowDecl.end()) 1601 return nullptr; 1602 1603 return Pos->second; 1604 } 1605 1606 void 1607 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1608 UsingShadowDecl *Pattern) { 1609 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1610 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1611 } 1612 1613 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1614 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 1615 = InstantiatedFromUnnamedFieldDecl.find(Field); 1616 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 1617 return nullptr; 1618 1619 return Pos->second; 1620 } 1621 1622 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1623 FieldDecl *Tmpl) { 1624 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1625 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1626 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1627 "Already noted what unnamed field was instantiated from"); 1628 1629 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1630 } 1631 1632 ASTContext::overridden_cxx_method_iterator 1633 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1634 return overridden_methods(Method).begin(); 1635 } 1636 1637 ASTContext::overridden_cxx_method_iterator 1638 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1639 return overridden_methods(Method).end(); 1640 } 1641 1642 unsigned 1643 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1644 auto Range = overridden_methods(Method); 1645 return Range.end() - Range.begin(); 1646 } 1647 1648 ASTContext::overridden_method_range 1649 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1650 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1651 OverriddenMethods.find(Method->getCanonicalDecl()); 1652 if (Pos == OverriddenMethods.end()) 1653 return overridden_method_range(nullptr, nullptr); 1654 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1655 } 1656 1657 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1658 const CXXMethodDecl *Overridden) { 1659 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1660 OverriddenMethods[Method].push_back(Overridden); 1661 } 1662 1663 void ASTContext::getOverriddenMethods( 1664 const NamedDecl *D, 1665 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1666 assert(D); 1667 1668 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1669 Overridden.append(overridden_methods_begin(CXXMethod), 1670 overridden_methods_end(CXXMethod)); 1671 return; 1672 } 1673 1674 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1675 if (!Method) 1676 return; 1677 1678 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1679 Method->getOverriddenMethods(OverDecls); 1680 Overridden.append(OverDecls.begin(), OverDecls.end()); 1681 } 1682 1683 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1684 assert(!Import->getNextLocalImport() && 1685 "Import declaration already in the chain"); 1686 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1687 if (!FirstLocalImport) { 1688 FirstLocalImport = Import; 1689 LastLocalImport = Import; 1690 return; 1691 } 1692 1693 LastLocalImport->setNextLocalImport(Import); 1694 LastLocalImport = Import; 1695 } 1696 1697 //===----------------------------------------------------------------------===// 1698 // Type Sizing and Analysis 1699 //===----------------------------------------------------------------------===// 1700 1701 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1702 /// scalar floating point type. 1703 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1704 switch (T->castAs<BuiltinType>()->getKind()) { 1705 default: 1706 llvm_unreachable("Not a floating point type!"); 1707 case BuiltinType::BFloat16: 1708 return Target->getBFloat16Format(); 1709 case BuiltinType::Float16: 1710 return Target->getHalfFormat(); 1711 case BuiltinType::Half: 1712 // For HLSL, when the native half type is disabled, half will be treat as 1713 // float. 1714 if (getLangOpts().HLSL) 1715 if (getLangOpts().NativeHalfType) 1716 return Target->getHalfFormat(); 1717 else 1718 return Target->getFloatFormat(); 1719 else 1720 return Target->getHalfFormat(); 1721 case BuiltinType::Float: return Target->getFloatFormat(); 1722 case BuiltinType::Double: return Target->getDoubleFormat(); 1723 case BuiltinType::Ibm128: 1724 return Target->getIbm128Format(); 1725 case BuiltinType::LongDouble: 1726 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1727 return AuxTarget->getLongDoubleFormat(); 1728 return Target->getLongDoubleFormat(); 1729 case BuiltinType::Float128: 1730 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1731 return AuxTarget->getFloat128Format(); 1732 return Target->getFloat128Format(); 1733 } 1734 } 1735 1736 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1737 unsigned Align = Target->getCharWidth(); 1738 1739 bool UseAlignAttrOnly = false; 1740 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1741 Align = AlignFromAttr; 1742 1743 // __attribute__((aligned)) can increase or decrease alignment 1744 // *except* on a struct or struct member, where it only increases 1745 // alignment unless 'packed' is also specified. 1746 // 1747 // It is an error for alignas to decrease alignment, so we can 1748 // ignore that possibility; Sema should diagnose it. 1749 if (isa<FieldDecl>(D)) { 1750 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1751 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1752 } else { 1753 UseAlignAttrOnly = true; 1754 } 1755 } 1756 else if (isa<FieldDecl>(D)) 1757 UseAlignAttrOnly = 1758 D->hasAttr<PackedAttr>() || 1759 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1760 1761 // If we're using the align attribute only, just ignore everything 1762 // else about the declaration and its type. 1763 if (UseAlignAttrOnly) { 1764 // do nothing 1765 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1766 QualType T = VD->getType(); 1767 if (const auto *RT = T->getAs<ReferenceType>()) { 1768 if (ForAlignof) 1769 T = RT->getPointeeType(); 1770 else 1771 T = getPointerType(RT->getPointeeType()); 1772 } 1773 QualType BaseT = getBaseElementType(T); 1774 if (T->isFunctionType()) 1775 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1776 else if (!BaseT->isIncompleteType()) { 1777 // Adjust alignments of declarations with array type by the 1778 // large-array alignment on the target. 1779 if (const ArrayType *arrayType = getAsArrayType(T)) { 1780 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1781 if (!ForAlignof && MinWidth) { 1782 if (isa<VariableArrayType>(arrayType)) 1783 Align = std::max(Align, Target->getLargeArrayAlign()); 1784 else if (isa<ConstantArrayType>(arrayType) && 1785 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1786 Align = std::max(Align, Target->getLargeArrayAlign()); 1787 } 1788 } 1789 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1790 if (BaseT.getQualifiers().hasUnaligned()) 1791 Align = Target->getCharWidth(); 1792 if (const auto *VD = dyn_cast<VarDecl>(D)) { 1793 if (VD->hasGlobalStorage() && !ForAlignof) { 1794 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 1795 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1796 } 1797 } 1798 } 1799 1800 // Fields can be subject to extra alignment constraints, like if 1801 // the field is packed, the struct is packed, or the struct has a 1802 // a max-field-alignment constraint (#pragma pack). So calculate 1803 // the actual alignment of the field within the struct, and then 1804 // (as we're expected to) constrain that by the alignment of the type. 1805 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1806 const RecordDecl *Parent = Field->getParent(); 1807 // We can only produce a sensible answer if the record is valid. 1808 if (!Parent->isInvalidDecl()) { 1809 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1810 1811 // Start with the record's overall alignment. 1812 unsigned FieldAlign = toBits(Layout.getAlignment()); 1813 1814 // Use the GCD of that and the offset within the record. 1815 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1816 if (Offset > 0) { 1817 // Alignment is always a power of 2, so the GCD will be a power of 2, 1818 // which means we get to do this crazy thing instead of Euclid's. 1819 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1820 if (LowBitOfOffset < FieldAlign) 1821 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1822 } 1823 1824 Align = std::min(Align, FieldAlign); 1825 } 1826 } 1827 } 1828 1829 // Some targets have hard limitation on the maximum requestable alignment in 1830 // aligned attribute for static variables. 1831 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1832 const auto *VD = dyn_cast<VarDecl>(D); 1833 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1834 Align = std::min(Align, MaxAlignedAttr); 1835 1836 return toCharUnitsFromBits(Align); 1837 } 1838 1839 CharUnits ASTContext::getExnObjectAlignment() const { 1840 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1841 } 1842 1843 // getTypeInfoDataSizeInChars - Return the size of a type, in 1844 // chars. If the type is a record, its data size is returned. This is 1845 // the size of the memcpy that's performed when assigning this type 1846 // using a trivial copy/move assignment operator. 1847 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1848 TypeInfoChars Info = getTypeInfoInChars(T); 1849 1850 // In C++, objects can sometimes be allocated into the tail padding 1851 // of a base-class subobject. We decide whether that's possible 1852 // during class layout, so here we can just trust the layout results. 1853 if (getLangOpts().CPlusPlus) { 1854 if (const auto *RT = T->getAs<RecordType>()) { 1855 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1856 Info.Width = layout.getDataSize(); 1857 } 1858 } 1859 1860 return Info; 1861 } 1862 1863 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1864 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1865 TypeInfoChars 1866 static getConstantArrayInfoInChars(const ASTContext &Context, 1867 const ConstantArrayType *CAT) { 1868 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1869 uint64_t Size = CAT->getSize().getZExtValue(); 1870 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1871 (uint64_t)(-1)/Size) && 1872 "Overflow in array type char size evaluation"); 1873 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1874 unsigned Align = EltInfo.Align.getQuantity(); 1875 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1876 Context.getTargetInfo().getPointerWidth(0) == 64) 1877 Width = llvm::alignTo(Width, Align); 1878 return TypeInfoChars(CharUnits::fromQuantity(Width), 1879 CharUnits::fromQuantity(Align), 1880 EltInfo.AlignRequirement); 1881 } 1882 1883 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1884 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1885 return getConstantArrayInfoInChars(*this, CAT); 1886 TypeInfo Info = getTypeInfo(T); 1887 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1888 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1889 } 1890 1891 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1892 return getTypeInfoInChars(T.getTypePtr()); 1893 } 1894 1895 bool ASTContext::isAlignmentRequired(const Type *T) const { 1896 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1897 } 1898 1899 bool ASTContext::isAlignmentRequired(QualType T) const { 1900 return isAlignmentRequired(T.getTypePtr()); 1901 } 1902 1903 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1904 bool NeedsPreferredAlignment) const { 1905 // An alignment on a typedef overrides anything else. 1906 if (const auto *TT = T->getAs<TypedefType>()) 1907 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1908 return Align; 1909 1910 // If we have an (array of) complete type, we're done. 1911 T = getBaseElementType(T); 1912 if (!T->isIncompleteType()) 1913 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1914 1915 // If we had an array type, its element type might be a typedef 1916 // type with an alignment attribute. 1917 if (const auto *TT = T->getAs<TypedefType>()) 1918 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1919 return Align; 1920 1921 // Otherwise, see if the declaration of the type had an attribute. 1922 if (const auto *TT = T->getAs<TagType>()) 1923 return TT->getDecl()->getMaxAlignment(); 1924 1925 return 0; 1926 } 1927 1928 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1929 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1930 if (I != MemoizedTypeInfo.end()) 1931 return I->second; 1932 1933 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1934 TypeInfo TI = getTypeInfoImpl(T); 1935 MemoizedTypeInfo[T] = TI; 1936 return TI; 1937 } 1938 1939 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1940 /// method does not work on incomplete types. 1941 /// 1942 /// FIXME: Pointers into different addr spaces could have different sizes and 1943 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1944 /// should take a QualType, &c. 1945 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1946 uint64_t Width = 0; 1947 unsigned Align = 8; 1948 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1949 unsigned AS = 0; 1950 switch (T->getTypeClass()) { 1951 #define TYPE(Class, Base) 1952 #define ABSTRACT_TYPE(Class, Base) 1953 #define NON_CANONICAL_TYPE(Class, Base) 1954 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1955 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1956 case Type::Class: \ 1957 assert(!T->isDependentType() && "should not see dependent types here"); \ 1958 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1959 #include "clang/AST/TypeNodes.inc" 1960 llvm_unreachable("Should not see dependent types"); 1961 1962 case Type::FunctionNoProto: 1963 case Type::FunctionProto: 1964 // GCC extension: alignof(function) = 32 bits 1965 Width = 0; 1966 Align = 32; 1967 break; 1968 1969 case Type::IncompleteArray: 1970 case Type::VariableArray: 1971 case Type::ConstantArray: { 1972 // Model non-constant sized arrays as size zero, but track the alignment. 1973 uint64_t Size = 0; 1974 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1975 Size = CAT->getSize().getZExtValue(); 1976 1977 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1978 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1979 "Overflow in array type bit size evaluation"); 1980 Width = EltInfo.Width * Size; 1981 Align = EltInfo.Align; 1982 AlignRequirement = EltInfo.AlignRequirement; 1983 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1984 getTargetInfo().getPointerWidth(0) == 64) 1985 Width = llvm::alignTo(Width, Align); 1986 break; 1987 } 1988 1989 case Type::ExtVector: 1990 case Type::Vector: { 1991 const auto *VT = cast<VectorType>(T); 1992 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1993 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 1994 : EltInfo.Width * VT->getNumElements(); 1995 // Enforce at least byte alignment. 1996 Align = std::max<unsigned>(8, Width); 1997 1998 // If the alignment is not a power of 2, round up to the next power of 2. 1999 // This happens for non-power-of-2 length vectors. 2000 if (Align & (Align-1)) { 2001 Align = llvm::NextPowerOf2(Align); 2002 Width = llvm::alignTo(Width, Align); 2003 } 2004 // Adjust the alignment based on the target max. 2005 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 2006 if (TargetVectorAlign && TargetVectorAlign < Align) 2007 Align = TargetVectorAlign; 2008 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 2009 // Adjust the alignment for fixed-length SVE vectors. This is important 2010 // for non-power-of-2 vector lengths. 2011 Align = 128; 2012 else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 2013 // Adjust the alignment for fixed-length SVE predicates. 2014 Align = 16; 2015 break; 2016 } 2017 2018 case Type::ConstantMatrix: { 2019 const auto *MT = cast<ConstantMatrixType>(T); 2020 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 2021 // The internal layout of a matrix value is implementation defined. 2022 // Initially be ABI compatible with arrays with respect to alignment and 2023 // size. 2024 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 2025 Align = ElementInfo.Align; 2026 break; 2027 } 2028 2029 case Type::Builtin: 2030 switch (cast<BuiltinType>(T)->getKind()) { 2031 default: llvm_unreachable("Unknown builtin type!"); 2032 case BuiltinType::Void: 2033 // GCC extension: alignof(void) = 8 bits. 2034 Width = 0; 2035 Align = 8; 2036 break; 2037 case BuiltinType::Bool: 2038 Width = Target->getBoolWidth(); 2039 Align = Target->getBoolAlign(); 2040 break; 2041 case BuiltinType::Char_S: 2042 case BuiltinType::Char_U: 2043 case BuiltinType::UChar: 2044 case BuiltinType::SChar: 2045 case BuiltinType::Char8: 2046 Width = Target->getCharWidth(); 2047 Align = Target->getCharAlign(); 2048 break; 2049 case BuiltinType::WChar_S: 2050 case BuiltinType::WChar_U: 2051 Width = Target->getWCharWidth(); 2052 Align = Target->getWCharAlign(); 2053 break; 2054 case BuiltinType::Char16: 2055 Width = Target->getChar16Width(); 2056 Align = Target->getChar16Align(); 2057 break; 2058 case BuiltinType::Char32: 2059 Width = Target->getChar32Width(); 2060 Align = Target->getChar32Align(); 2061 break; 2062 case BuiltinType::UShort: 2063 case BuiltinType::Short: 2064 Width = Target->getShortWidth(); 2065 Align = Target->getShortAlign(); 2066 break; 2067 case BuiltinType::UInt: 2068 case BuiltinType::Int: 2069 Width = Target->getIntWidth(); 2070 Align = Target->getIntAlign(); 2071 break; 2072 case BuiltinType::ULong: 2073 case BuiltinType::Long: 2074 Width = Target->getLongWidth(); 2075 Align = Target->getLongAlign(); 2076 break; 2077 case BuiltinType::ULongLong: 2078 case BuiltinType::LongLong: 2079 Width = Target->getLongLongWidth(); 2080 Align = Target->getLongLongAlign(); 2081 break; 2082 case BuiltinType::Int128: 2083 case BuiltinType::UInt128: 2084 Width = 128; 2085 Align = 128; // int128_t is 128-bit aligned on all targets. 2086 break; 2087 case BuiltinType::ShortAccum: 2088 case BuiltinType::UShortAccum: 2089 case BuiltinType::SatShortAccum: 2090 case BuiltinType::SatUShortAccum: 2091 Width = Target->getShortAccumWidth(); 2092 Align = Target->getShortAccumAlign(); 2093 break; 2094 case BuiltinType::Accum: 2095 case BuiltinType::UAccum: 2096 case BuiltinType::SatAccum: 2097 case BuiltinType::SatUAccum: 2098 Width = Target->getAccumWidth(); 2099 Align = Target->getAccumAlign(); 2100 break; 2101 case BuiltinType::LongAccum: 2102 case BuiltinType::ULongAccum: 2103 case BuiltinType::SatLongAccum: 2104 case BuiltinType::SatULongAccum: 2105 Width = Target->getLongAccumWidth(); 2106 Align = Target->getLongAccumAlign(); 2107 break; 2108 case BuiltinType::ShortFract: 2109 case BuiltinType::UShortFract: 2110 case BuiltinType::SatShortFract: 2111 case BuiltinType::SatUShortFract: 2112 Width = Target->getShortFractWidth(); 2113 Align = Target->getShortFractAlign(); 2114 break; 2115 case BuiltinType::Fract: 2116 case BuiltinType::UFract: 2117 case BuiltinType::SatFract: 2118 case BuiltinType::SatUFract: 2119 Width = Target->getFractWidth(); 2120 Align = Target->getFractAlign(); 2121 break; 2122 case BuiltinType::LongFract: 2123 case BuiltinType::ULongFract: 2124 case BuiltinType::SatLongFract: 2125 case BuiltinType::SatULongFract: 2126 Width = Target->getLongFractWidth(); 2127 Align = Target->getLongFractAlign(); 2128 break; 2129 case BuiltinType::BFloat16: 2130 if (Target->hasBFloat16Type()) { 2131 Width = Target->getBFloat16Width(); 2132 Align = Target->getBFloat16Align(); 2133 } 2134 break; 2135 case BuiltinType::Float16: 2136 case BuiltinType::Half: 2137 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2138 !getLangOpts().OpenMPIsDevice) { 2139 Width = Target->getHalfWidth(); 2140 Align = Target->getHalfAlign(); 2141 } else { 2142 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2143 "Expected OpenMP device compilation."); 2144 Width = AuxTarget->getHalfWidth(); 2145 Align = AuxTarget->getHalfAlign(); 2146 } 2147 break; 2148 case BuiltinType::Float: 2149 Width = Target->getFloatWidth(); 2150 Align = Target->getFloatAlign(); 2151 break; 2152 case BuiltinType::Double: 2153 Width = Target->getDoubleWidth(); 2154 Align = Target->getDoubleAlign(); 2155 break; 2156 case BuiltinType::Ibm128: 2157 Width = Target->getIbm128Width(); 2158 Align = Target->getIbm128Align(); 2159 break; 2160 case BuiltinType::LongDouble: 2161 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2162 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2163 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2164 Width = AuxTarget->getLongDoubleWidth(); 2165 Align = AuxTarget->getLongDoubleAlign(); 2166 } else { 2167 Width = Target->getLongDoubleWidth(); 2168 Align = Target->getLongDoubleAlign(); 2169 } 2170 break; 2171 case BuiltinType::Float128: 2172 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2173 !getLangOpts().OpenMPIsDevice) { 2174 Width = Target->getFloat128Width(); 2175 Align = Target->getFloat128Align(); 2176 } else { 2177 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2178 "Expected OpenMP device compilation."); 2179 Width = AuxTarget->getFloat128Width(); 2180 Align = AuxTarget->getFloat128Align(); 2181 } 2182 break; 2183 case BuiltinType::NullPtr: 2184 Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) 2185 Align = Target->getPointerAlign(0); // == sizeof(void*) 2186 break; 2187 case BuiltinType::ObjCId: 2188 case BuiltinType::ObjCClass: 2189 case BuiltinType::ObjCSel: 2190 Width = Target->getPointerWidth(0); 2191 Align = Target->getPointerAlign(0); 2192 break; 2193 case BuiltinType::OCLSampler: 2194 case BuiltinType::OCLEvent: 2195 case BuiltinType::OCLClkEvent: 2196 case BuiltinType::OCLQueue: 2197 case BuiltinType::OCLReserveID: 2198 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2199 case BuiltinType::Id: 2200 #include "clang/Basic/OpenCLImageTypes.def" 2201 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2202 case BuiltinType::Id: 2203 #include "clang/Basic/OpenCLExtensionTypes.def" 2204 AS = getTargetAddressSpace( 2205 Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T))); 2206 Width = Target->getPointerWidth(AS); 2207 Align = Target->getPointerAlign(AS); 2208 break; 2209 // The SVE types are effectively target-specific. The length of an 2210 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2211 // of 128 bits. There is one predicate bit for each vector byte, so the 2212 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2213 // 2214 // Because the length is only known at runtime, we use a dummy value 2215 // of 0 for the static length. The alignment values are those defined 2216 // by the Procedure Call Standard for the Arm Architecture. 2217 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2218 IsSigned, IsFP, IsBF) \ 2219 case BuiltinType::Id: \ 2220 Width = 0; \ 2221 Align = 128; \ 2222 break; 2223 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2224 case BuiltinType::Id: \ 2225 Width = 0; \ 2226 Align = 16; \ 2227 break; 2228 #include "clang/Basic/AArch64SVEACLETypes.def" 2229 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2230 case BuiltinType::Id: \ 2231 Width = Size; \ 2232 Align = Size; \ 2233 break; 2234 #include "clang/Basic/PPCTypes.def" 2235 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2236 IsFP) \ 2237 case BuiltinType::Id: \ 2238 Width = 0; \ 2239 Align = ElBits; \ 2240 break; 2241 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2242 case BuiltinType::Id: \ 2243 Width = 0; \ 2244 Align = 8; \ 2245 break; 2246 #include "clang/Basic/RISCVVTypes.def" 2247 } 2248 break; 2249 case Type::ObjCObjectPointer: 2250 Width = Target->getPointerWidth(0); 2251 Align = Target->getPointerAlign(0); 2252 break; 2253 case Type::BlockPointer: 2254 AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType()); 2255 Width = Target->getPointerWidth(AS); 2256 Align = Target->getPointerAlign(AS); 2257 break; 2258 case Type::LValueReference: 2259 case Type::RValueReference: 2260 // alignof and sizeof should never enter this code path here, so we go 2261 // the pointer route. 2262 AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType()); 2263 Width = Target->getPointerWidth(AS); 2264 Align = Target->getPointerAlign(AS); 2265 break; 2266 case Type::Pointer: 2267 AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); 2268 Width = Target->getPointerWidth(AS); 2269 Align = Target->getPointerAlign(AS); 2270 break; 2271 case Type::MemberPointer: { 2272 const auto *MPT = cast<MemberPointerType>(T); 2273 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2274 Width = MPI.Width; 2275 Align = MPI.Align; 2276 break; 2277 } 2278 case Type::Complex: { 2279 // Complex types have the same alignment as their elements, but twice the 2280 // size. 2281 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2282 Width = EltInfo.Width * 2; 2283 Align = EltInfo.Align; 2284 break; 2285 } 2286 case Type::ObjCObject: 2287 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2288 case Type::Adjusted: 2289 case Type::Decayed: 2290 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2291 case Type::ObjCInterface: { 2292 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2293 if (ObjCI->getDecl()->isInvalidDecl()) { 2294 Width = 8; 2295 Align = 8; 2296 break; 2297 } 2298 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2299 Width = toBits(Layout.getSize()); 2300 Align = toBits(Layout.getAlignment()); 2301 break; 2302 } 2303 case Type::BitInt: { 2304 const auto *EIT = cast<BitIntType>(T); 2305 Align = 2306 std::min(static_cast<unsigned>(std::max( 2307 getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), 2308 Target->getLongLongAlign()); 2309 Width = llvm::alignTo(EIT->getNumBits(), Align); 2310 break; 2311 } 2312 case Type::Record: 2313 case Type::Enum: { 2314 const auto *TT = cast<TagType>(T); 2315 2316 if (TT->getDecl()->isInvalidDecl()) { 2317 Width = 8; 2318 Align = 8; 2319 break; 2320 } 2321 2322 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2323 const EnumDecl *ED = ET->getDecl(); 2324 TypeInfo Info = 2325 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2326 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2327 Info.Align = AttrAlign; 2328 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2329 } 2330 return Info; 2331 } 2332 2333 const auto *RT = cast<RecordType>(TT); 2334 const RecordDecl *RD = RT->getDecl(); 2335 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2336 Width = toBits(Layout.getSize()); 2337 Align = toBits(Layout.getAlignment()); 2338 AlignRequirement = RD->hasAttr<AlignedAttr>() 2339 ? AlignRequirementKind::RequiredByRecord 2340 : AlignRequirementKind::None; 2341 break; 2342 } 2343 2344 case Type::SubstTemplateTypeParm: 2345 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2346 getReplacementType().getTypePtr()); 2347 2348 case Type::Auto: 2349 case Type::DeducedTemplateSpecialization: { 2350 const auto *A = cast<DeducedType>(T); 2351 assert(!A->getDeducedType().isNull() && 2352 "cannot request the size of an undeduced or dependent auto type"); 2353 return getTypeInfo(A->getDeducedType().getTypePtr()); 2354 } 2355 2356 case Type::Paren: 2357 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2358 2359 case Type::MacroQualified: 2360 return getTypeInfo( 2361 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2362 2363 case Type::ObjCTypeParam: 2364 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2365 2366 case Type::Using: 2367 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2368 2369 case Type::Typedef: { 2370 const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); 2371 TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); 2372 // If the typedef has an aligned attribute on it, it overrides any computed 2373 // alignment we have. This violates the GCC documentation (which says that 2374 // attribute(aligned) can only round up) but matches its implementation. 2375 if (unsigned AttrAlign = Typedef->getMaxAlignment()) { 2376 Align = AttrAlign; 2377 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2378 } else { 2379 Align = Info.Align; 2380 AlignRequirement = Info.AlignRequirement; 2381 } 2382 Width = Info.Width; 2383 break; 2384 } 2385 2386 case Type::Elaborated: 2387 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2388 2389 case Type::Attributed: 2390 return getTypeInfo( 2391 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2392 2393 case Type::BTFTagAttributed: 2394 return getTypeInfo( 2395 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2396 2397 case Type::Atomic: { 2398 // Start with the base type information. 2399 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2400 Width = Info.Width; 2401 Align = Info.Align; 2402 2403 if (!Width) { 2404 // An otherwise zero-sized type should still generate an 2405 // atomic operation. 2406 Width = Target->getCharWidth(); 2407 assert(Align); 2408 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2409 // If the size of the type doesn't exceed the platform's max 2410 // atomic promotion width, make the size and alignment more 2411 // favorable to atomic operations: 2412 2413 // Round the size up to a power of 2. 2414 if (!llvm::isPowerOf2_64(Width)) 2415 Width = llvm::NextPowerOf2(Width); 2416 2417 // Set the alignment equal to the size. 2418 Align = static_cast<unsigned>(Width); 2419 } 2420 } 2421 break; 2422 2423 case Type::Pipe: 2424 Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global)); 2425 Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global)); 2426 break; 2427 } 2428 2429 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2430 return TypeInfo(Width, Align, AlignRequirement); 2431 } 2432 2433 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2434 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2435 if (I != MemoizedUnadjustedAlign.end()) 2436 return I->second; 2437 2438 unsigned UnadjustedAlign; 2439 if (const auto *RT = T->getAs<RecordType>()) { 2440 const RecordDecl *RD = RT->getDecl(); 2441 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2442 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2443 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2444 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2445 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2446 } else { 2447 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2448 } 2449 2450 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2451 return UnadjustedAlign; 2452 } 2453 2454 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2455 unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); 2456 return SimdAlign; 2457 } 2458 2459 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2460 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2461 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2462 } 2463 2464 /// toBits - Convert a size in characters to a size in characters. 2465 int64_t ASTContext::toBits(CharUnits CharSize) const { 2466 return CharSize.getQuantity() * getCharWidth(); 2467 } 2468 2469 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2470 /// This method does not work on incomplete types. 2471 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2472 return getTypeInfoInChars(T).Width; 2473 } 2474 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2475 return getTypeInfoInChars(T).Width; 2476 } 2477 2478 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2479 /// characters. This method does not work on incomplete types. 2480 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2481 return toCharUnitsFromBits(getTypeAlign(T)); 2482 } 2483 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2484 return toCharUnitsFromBits(getTypeAlign(T)); 2485 } 2486 2487 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2488 /// type, in characters, before alignment adustments. This method does 2489 /// not work on incomplete types. 2490 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2491 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2492 } 2493 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2494 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2495 } 2496 2497 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2498 /// type for the current target in bits. This can be different than the ABI 2499 /// alignment in cases where it is beneficial for performance or backwards 2500 /// compatibility preserving to overalign a data type. (Note: despite the name, 2501 /// the preferred alignment is ABI-impacting, and not an optimization.) 2502 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2503 TypeInfo TI = getTypeInfo(T); 2504 unsigned ABIAlign = TI.Align; 2505 2506 T = T->getBaseElementTypeUnsafe(); 2507 2508 // The preferred alignment of member pointers is that of a pointer. 2509 if (T->isMemberPointerType()) 2510 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2511 2512 if (!Target->allowsLargerPreferedTypeAlignment()) 2513 return ABIAlign; 2514 2515 if (const auto *RT = T->getAs<RecordType>()) { 2516 const RecordDecl *RD = RT->getDecl(); 2517 2518 // When used as part of a typedef, or together with a 'packed' attribute, 2519 // the 'aligned' attribute can be used to decrease alignment. Note that the 2520 // 'packed' case is already taken into consideration when computing the 2521 // alignment, we only need to handle the typedef case here. 2522 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2523 RD->isInvalidDecl()) 2524 return ABIAlign; 2525 2526 unsigned PreferredAlign = static_cast<unsigned>( 2527 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2528 assert(PreferredAlign >= ABIAlign && 2529 "PreferredAlign should be at least as large as ABIAlign."); 2530 return PreferredAlign; 2531 } 2532 2533 // Double (and, for targets supporting AIX `power` alignment, long double) and 2534 // long long should be naturally aligned (despite requiring less alignment) if 2535 // possible. 2536 if (const auto *CT = T->getAs<ComplexType>()) 2537 T = CT->getElementType().getTypePtr(); 2538 if (const auto *ET = T->getAs<EnumType>()) 2539 T = ET->getDecl()->getIntegerType().getTypePtr(); 2540 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2541 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2542 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2543 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2544 Target->defaultsToAIXPowerAlignment())) 2545 // Don't increase the alignment if an alignment attribute was specified on a 2546 // typedef declaration. 2547 if (!TI.isAlignRequired()) 2548 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2549 2550 return ABIAlign; 2551 } 2552 2553 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2554 /// for __attribute__((aligned)) on this target, to be used if no alignment 2555 /// value is specified. 2556 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2557 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2558 } 2559 2560 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2561 /// to a global variable of the specified type. 2562 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2563 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2564 return std::max(getPreferredTypeAlign(T), 2565 getTargetInfo().getMinGlobalAlign(TypeSize)); 2566 } 2567 2568 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2569 /// should be given to a global variable of the specified type. 2570 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2571 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2572 } 2573 2574 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2575 CharUnits Offset = CharUnits::Zero(); 2576 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2577 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2578 Offset += Layout->getBaseClassOffset(Base); 2579 Layout = &getASTRecordLayout(Base); 2580 } 2581 return Offset; 2582 } 2583 2584 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2585 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2586 CharUnits ThisAdjustment = CharUnits::Zero(); 2587 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2588 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2589 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2590 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2591 const CXXRecordDecl *Base = RD; 2592 const CXXRecordDecl *Derived = Path[I]; 2593 if (DerivedMember) 2594 std::swap(Base, Derived); 2595 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2596 RD = Path[I]; 2597 } 2598 if (DerivedMember) 2599 ThisAdjustment = -ThisAdjustment; 2600 return ThisAdjustment; 2601 } 2602 2603 /// DeepCollectObjCIvars - 2604 /// This routine first collects all declared, but not synthesized, ivars in 2605 /// super class and then collects all ivars, including those synthesized for 2606 /// current class. This routine is used for implementation of current class 2607 /// when all ivars, declared and synthesized are known. 2608 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2609 bool leafClass, 2610 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2611 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2612 DeepCollectObjCIvars(SuperClass, false, Ivars); 2613 if (!leafClass) { 2614 llvm::append_range(Ivars, OI->ivars()); 2615 } else { 2616 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2617 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2618 Iv= Iv->getNextIvar()) 2619 Ivars.push_back(Iv); 2620 } 2621 } 2622 2623 /// CollectInheritedProtocols - Collect all protocols in current class and 2624 /// those inherited by it. 2625 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2626 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2627 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2628 // We can use protocol_iterator here instead of 2629 // all_referenced_protocol_iterator since we are walking all categories. 2630 for (auto *Proto : OI->all_referenced_protocols()) { 2631 CollectInheritedProtocols(Proto, Protocols); 2632 } 2633 2634 // Categories of this Interface. 2635 for (const auto *Cat : OI->visible_categories()) 2636 CollectInheritedProtocols(Cat, Protocols); 2637 2638 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2639 while (SD) { 2640 CollectInheritedProtocols(SD, Protocols); 2641 SD = SD->getSuperClass(); 2642 } 2643 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2644 for (auto *Proto : OC->protocols()) { 2645 CollectInheritedProtocols(Proto, Protocols); 2646 } 2647 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2648 // Insert the protocol. 2649 if (!Protocols.insert( 2650 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2651 return; 2652 2653 for (auto *Proto : OP->protocols()) 2654 CollectInheritedProtocols(Proto, Protocols); 2655 } 2656 } 2657 2658 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2659 const RecordDecl *RD) { 2660 assert(RD->isUnion() && "Must be union type"); 2661 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2662 2663 for (const auto *Field : RD->fields()) { 2664 if (!Context.hasUniqueObjectRepresentations(Field->getType())) 2665 return false; 2666 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2667 if (FieldSize != UnionSize) 2668 return false; 2669 } 2670 return !RD->field_empty(); 2671 } 2672 2673 static int64_t getSubobjectOffset(const FieldDecl *Field, 2674 const ASTContext &Context, 2675 const clang::ASTRecordLayout & /*Layout*/) { 2676 return Context.getFieldOffset(Field); 2677 } 2678 2679 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2680 const ASTContext &Context, 2681 const clang::ASTRecordLayout &Layout) { 2682 return Context.toBits(Layout.getBaseClassOffset(RD)); 2683 } 2684 2685 static llvm::Optional<int64_t> 2686 structHasUniqueObjectRepresentations(const ASTContext &Context, 2687 const RecordDecl *RD); 2688 2689 static llvm::Optional<int64_t> 2690 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) { 2691 if (Field->getType()->isRecordType()) { 2692 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2693 if (!RD->isUnion()) 2694 return structHasUniqueObjectRepresentations(Context, RD); 2695 } 2696 2697 // A _BitInt type may not be unique if it has padding bits 2698 // but if it is a bitfield the padding bits are not used. 2699 bool IsBitIntType = Field->getType()->isBitIntType(); 2700 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2701 !Context.hasUniqueObjectRepresentations(Field->getType())) 2702 return llvm::None; 2703 2704 int64_t FieldSizeInBits = 2705 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2706 if (Field->isBitField()) { 2707 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2708 if (IsBitIntType) { 2709 if ((unsigned)BitfieldSize > 2710 cast<BitIntType>(Field->getType())->getNumBits()) 2711 return llvm::None; 2712 } else if (BitfieldSize > FieldSizeInBits) { 2713 return llvm::None; 2714 } 2715 FieldSizeInBits = BitfieldSize; 2716 } else if (IsBitIntType && 2717 !Context.hasUniqueObjectRepresentations(Field->getType())) { 2718 return llvm::None; 2719 } 2720 return FieldSizeInBits; 2721 } 2722 2723 static llvm::Optional<int64_t> 2724 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context) { 2725 return structHasUniqueObjectRepresentations(Context, RD); 2726 } 2727 2728 template <typename RangeT> 2729 static llvm::Optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2730 const RangeT &Subobjects, int64_t CurOffsetInBits, 2731 const ASTContext &Context, const clang::ASTRecordLayout &Layout) { 2732 for (const auto *Subobject : Subobjects) { 2733 llvm::Optional<int64_t> SizeInBits = 2734 getSubobjectSizeInBits(Subobject, Context); 2735 if (!SizeInBits) 2736 return llvm::None; 2737 if (*SizeInBits != 0) { 2738 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2739 if (Offset != CurOffsetInBits) 2740 return llvm::None; 2741 CurOffsetInBits += *SizeInBits; 2742 } 2743 } 2744 return CurOffsetInBits; 2745 } 2746 2747 static llvm::Optional<int64_t> 2748 structHasUniqueObjectRepresentations(const ASTContext &Context, 2749 const RecordDecl *RD) { 2750 assert(!RD->isUnion() && "Must be struct/class type"); 2751 const auto &Layout = Context.getASTRecordLayout(RD); 2752 2753 int64_t CurOffsetInBits = 0; 2754 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2755 if (ClassDecl->isDynamicClass()) 2756 return llvm::None; 2757 2758 SmallVector<CXXRecordDecl *, 4> Bases; 2759 for (const auto &Base : ClassDecl->bases()) { 2760 // Empty types can be inherited from, and non-empty types can potentially 2761 // have tail padding, so just make sure there isn't an error. 2762 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2763 } 2764 2765 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2766 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2767 }); 2768 2769 llvm::Optional<int64_t> OffsetAfterBases = 2770 structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits, 2771 Context, Layout); 2772 if (!OffsetAfterBases) 2773 return llvm::None; 2774 CurOffsetInBits = *OffsetAfterBases; 2775 } 2776 2777 llvm::Optional<int64_t> OffsetAfterFields = 2778 structSubobjectsHaveUniqueObjectRepresentations( 2779 RD->fields(), CurOffsetInBits, Context, Layout); 2780 if (!OffsetAfterFields) 2781 return llvm::None; 2782 CurOffsetInBits = *OffsetAfterFields; 2783 2784 return CurOffsetInBits; 2785 } 2786 2787 bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { 2788 // C++17 [meta.unary.prop]: 2789 // The predicate condition for a template specialization 2790 // has_unique_object_representations<T> shall be 2791 // satisfied if and only if: 2792 // (9.1) - T is trivially copyable, and 2793 // (9.2) - any two objects of type T with the same value have the same 2794 // object representation, where two objects 2795 // of array or non-union class type are considered to have the same value 2796 // if their respective sequences of 2797 // direct subobjects have the same values, and two objects of union type 2798 // are considered to have the same 2799 // value if they have the same active member and the corresponding members 2800 // have the same value. 2801 // The set of scalar types for which this condition holds is 2802 // implementation-defined. [ Note: If a type has padding 2803 // bits, the condition does not hold; otherwise, the condition holds true 2804 // for unsigned integral types. -- end note ] 2805 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2806 2807 // Arrays are unique only if their element type is unique. 2808 if (Ty->isArrayType()) 2809 return hasUniqueObjectRepresentations(getBaseElementType(Ty)); 2810 2811 // (9.1) - T is trivially copyable... 2812 if (!Ty.isTriviallyCopyableType(*this)) 2813 return false; 2814 2815 // All integrals and enums are unique. 2816 if (Ty->isIntegralOrEnumerationType()) { 2817 // Except _BitInt types that have padding bits. 2818 if (const auto *BIT = dyn_cast<BitIntType>(Ty)) 2819 return getTypeSize(BIT) == BIT->getNumBits(); 2820 2821 return true; 2822 } 2823 2824 // All other pointers are unique. 2825 if (Ty->isPointerType()) 2826 return true; 2827 2828 if (Ty->isMemberPointerType()) { 2829 const auto *MPT = Ty->getAs<MemberPointerType>(); 2830 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2831 } 2832 2833 if (Ty->isRecordType()) { 2834 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2835 2836 if (Record->isInvalidDecl()) 2837 return false; 2838 2839 if (Record->isUnion()) 2840 return unionHasUniqueObjectRepresentations(*this, Record); 2841 2842 Optional<int64_t> StructSize = 2843 structHasUniqueObjectRepresentations(*this, Record); 2844 2845 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); 2846 } 2847 2848 // FIXME: More cases to handle here (list by rsmith): 2849 // vectors (careful about, eg, vector of 3 foo) 2850 // _Complex int and friends 2851 // _Atomic T 2852 // Obj-C block pointers 2853 // Obj-C object pointers 2854 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2855 // clk_event_t, queue_t, reserve_id_t) 2856 // There're also Obj-C class types and the Obj-C selector type, but I think it 2857 // makes sense for those to return false here. 2858 2859 return false; 2860 } 2861 2862 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2863 unsigned count = 0; 2864 // Count ivars declared in class extension. 2865 for (const auto *Ext : OI->known_extensions()) 2866 count += Ext->ivar_size(); 2867 2868 // Count ivar defined in this class's implementation. This 2869 // includes synthesized ivars. 2870 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2871 count += ImplDecl->ivar_size(); 2872 2873 return count; 2874 } 2875 2876 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2877 if (!E) 2878 return false; 2879 2880 // nullptr_t is always treated as null. 2881 if (E->getType()->isNullPtrType()) return true; 2882 2883 if (E->getType()->isAnyPointerType() && 2884 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2885 Expr::NPC_ValueDependentIsNull)) 2886 return true; 2887 2888 // Unfortunately, __null has type 'int'. 2889 if (isa<GNUNullExpr>(E)) return true; 2890 2891 return false; 2892 } 2893 2894 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2895 /// exists. 2896 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2897 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2898 I = ObjCImpls.find(D); 2899 if (I != ObjCImpls.end()) 2900 return cast<ObjCImplementationDecl>(I->second); 2901 return nullptr; 2902 } 2903 2904 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2905 /// exists. 2906 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2907 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2908 I = ObjCImpls.find(D); 2909 if (I != ObjCImpls.end()) 2910 return cast<ObjCCategoryImplDecl>(I->second); 2911 return nullptr; 2912 } 2913 2914 /// Set the implementation of ObjCInterfaceDecl. 2915 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2916 ObjCImplementationDecl *ImplD) { 2917 assert(IFaceD && ImplD && "Passed null params"); 2918 ObjCImpls[IFaceD] = ImplD; 2919 } 2920 2921 /// Set the implementation of ObjCCategoryDecl. 2922 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2923 ObjCCategoryImplDecl *ImplD) { 2924 assert(CatD && ImplD && "Passed null params"); 2925 ObjCImpls[CatD] = ImplD; 2926 } 2927 2928 const ObjCMethodDecl * 2929 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2930 return ObjCMethodRedecls.lookup(MD); 2931 } 2932 2933 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2934 const ObjCMethodDecl *Redecl) { 2935 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2936 ObjCMethodRedecls[MD] = Redecl; 2937 } 2938 2939 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2940 const NamedDecl *ND) const { 2941 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2942 return ID; 2943 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2944 return CD->getClassInterface(); 2945 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2946 return IMD->getClassInterface(); 2947 2948 return nullptr; 2949 } 2950 2951 /// Get the copy initialization expression of VarDecl, or nullptr if 2952 /// none exists. 2953 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2954 assert(VD && "Passed null params"); 2955 assert(VD->hasAttr<BlocksAttr>() && 2956 "getBlockVarCopyInits - not __block var"); 2957 auto I = BlockVarCopyInits.find(VD); 2958 if (I != BlockVarCopyInits.end()) 2959 return I->second; 2960 return {nullptr, false}; 2961 } 2962 2963 /// Set the copy initialization expression of a block var decl. 2964 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2965 bool CanThrow) { 2966 assert(VD && CopyExpr && "Passed null params"); 2967 assert(VD->hasAttr<BlocksAttr>() && 2968 "setBlockVarCopyInits - not __block var"); 2969 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2970 } 2971 2972 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2973 unsigned DataSize) const { 2974 if (!DataSize) 2975 DataSize = TypeLoc::getFullDataSizeForType(T); 2976 else 2977 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2978 "incorrect data size provided to CreateTypeSourceInfo!"); 2979 2980 auto *TInfo = 2981 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2982 new (TInfo) TypeSourceInfo(T); 2983 return TInfo; 2984 } 2985 2986 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2987 SourceLocation L) const { 2988 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2989 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2990 return DI; 2991 } 2992 2993 const ASTRecordLayout & 2994 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2995 return getObjCLayout(D, nullptr); 2996 } 2997 2998 const ASTRecordLayout & 2999 ASTContext::getASTObjCImplementationLayout( 3000 const ObjCImplementationDecl *D) const { 3001 return getObjCLayout(D->getClassInterface(), D); 3002 } 3003 3004 //===----------------------------------------------------------------------===// 3005 // Type creation/memoization methods 3006 //===----------------------------------------------------------------------===// 3007 3008 QualType 3009 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 3010 unsigned fastQuals = quals.getFastQualifiers(); 3011 quals.removeFastQualifiers(); 3012 3013 // Check if we've already instantiated this type. 3014 llvm::FoldingSetNodeID ID; 3015 ExtQuals::Profile(ID, baseType, quals); 3016 void *insertPos = nullptr; 3017 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 3018 assert(eq->getQualifiers() == quals); 3019 return QualType(eq, fastQuals); 3020 } 3021 3022 // If the base type is not canonical, make the appropriate canonical type. 3023 QualType canon; 3024 if (!baseType->isCanonicalUnqualified()) { 3025 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 3026 canonSplit.Quals.addConsistentQualifiers(quals); 3027 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3028 3029 // Re-find the insert position. 3030 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3031 } 3032 3033 auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 3034 ExtQualNodes.InsertNode(eq, insertPos); 3035 return QualType(eq, fastQuals); 3036 } 3037 3038 QualType ASTContext::getAddrSpaceQualType(QualType T, 3039 LangAS AddressSpace) const { 3040 QualType CanT = getCanonicalType(T); 3041 if (CanT.getAddressSpace() == AddressSpace) 3042 return T; 3043 3044 // If we are composing extended qualifiers together, merge together 3045 // into one ExtQuals node. 3046 QualifierCollector Quals; 3047 const Type *TypeNode = Quals.strip(T); 3048 3049 // If this type already has an address space specified, it cannot get 3050 // another one. 3051 assert(!Quals.hasAddressSpace() && 3052 "Type cannot be in multiple addr spaces!"); 3053 Quals.addAddressSpace(AddressSpace); 3054 3055 return getExtQualType(TypeNode, Quals); 3056 } 3057 3058 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3059 // If the type is not qualified with an address space, just return it 3060 // immediately. 3061 if (!T.hasAddressSpace()) 3062 return T; 3063 3064 // If we are composing extended qualifiers together, merge together 3065 // into one ExtQuals node. 3066 QualifierCollector Quals; 3067 const Type *TypeNode; 3068 3069 while (T.hasAddressSpace()) { 3070 TypeNode = Quals.strip(T); 3071 3072 // If the type no longer has an address space after stripping qualifiers, 3073 // jump out. 3074 if (!QualType(TypeNode, 0).hasAddressSpace()) 3075 break; 3076 3077 // There might be sugar in the way. Strip it and try again. 3078 T = T.getSingleStepDesugaredType(*this); 3079 } 3080 3081 Quals.removeAddressSpace(); 3082 3083 // Removal of the address space can mean there are no longer any 3084 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3085 // or required. 3086 if (Quals.hasNonFastQualifiers()) 3087 return getExtQualType(TypeNode, Quals); 3088 else 3089 return QualType(TypeNode, Quals.getFastQualifiers()); 3090 } 3091 3092 QualType ASTContext::getObjCGCQualType(QualType T, 3093 Qualifiers::GC GCAttr) const { 3094 QualType CanT = getCanonicalType(T); 3095 if (CanT.getObjCGCAttr() == GCAttr) 3096 return T; 3097 3098 if (const auto *ptr = T->getAs<PointerType>()) { 3099 QualType Pointee = ptr->getPointeeType(); 3100 if (Pointee->isAnyPointerType()) { 3101 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3102 return getPointerType(ResultType); 3103 } 3104 } 3105 3106 // If we are composing extended qualifiers together, merge together 3107 // into one ExtQuals node. 3108 QualifierCollector Quals; 3109 const Type *TypeNode = Quals.strip(T); 3110 3111 // If this type already has an ObjCGC specified, it cannot get 3112 // another one. 3113 assert(!Quals.hasObjCGCAttr() && 3114 "Type cannot have multiple ObjCGCs!"); 3115 Quals.addObjCGCAttr(GCAttr); 3116 3117 return getExtQualType(TypeNode, Quals); 3118 } 3119 3120 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3121 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3122 QualType Pointee = Ptr->getPointeeType(); 3123 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3124 return getPointerType(removeAddrSpaceQualType(Pointee)); 3125 } 3126 } 3127 return T; 3128 } 3129 3130 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3131 FunctionType::ExtInfo Info) { 3132 if (T->getExtInfo() == Info) 3133 return T; 3134 3135 QualType Result; 3136 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3137 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3138 } else { 3139 const auto *FPT = cast<FunctionProtoType>(T); 3140 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3141 EPI.ExtInfo = Info; 3142 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3143 } 3144 3145 return cast<FunctionType>(Result.getTypePtr()); 3146 } 3147 3148 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3149 QualType ResultType) { 3150 FD = FD->getMostRecentDecl(); 3151 while (true) { 3152 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3153 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3154 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3155 if (FunctionDecl *Next = FD->getPreviousDecl()) 3156 FD = Next; 3157 else 3158 break; 3159 } 3160 if (ASTMutationListener *L = getASTMutationListener()) 3161 L->DeducedReturnType(FD, ResultType); 3162 } 3163 3164 /// Get a function type and produce the equivalent function type with the 3165 /// specified exception specification. Type sugar that can be present on a 3166 /// declaration of a function with an exception specification is permitted 3167 /// and preserved. Other type sugar (for instance, typedefs) is not. 3168 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3169 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { 3170 // Might have some parens. 3171 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3172 return getParenType( 3173 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3174 3175 // Might be wrapped in a macro qualified type. 3176 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3177 return getMacroQualifiedType( 3178 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3179 MQT->getMacroIdentifier()); 3180 3181 // Might have a calling-convention attribute. 3182 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3183 return getAttributedType( 3184 AT->getAttrKind(), 3185 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3186 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3187 3188 // Anything else must be a function type. Rebuild it with the new exception 3189 // specification. 3190 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3191 return getFunctionType( 3192 Proto->getReturnType(), Proto->getParamTypes(), 3193 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3194 } 3195 3196 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3197 QualType U) const { 3198 return hasSameType(T, U) || 3199 (getLangOpts().CPlusPlus17 && 3200 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3201 getFunctionTypeWithExceptionSpec(U, EST_None))); 3202 } 3203 3204 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3205 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3206 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3207 SmallVector<QualType, 16> Args(Proto->param_types()); 3208 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3209 Args[i] = removePtrSizeAddrSpace(Args[i]); 3210 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3211 } 3212 3213 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3214 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3215 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3216 } 3217 3218 return T; 3219 } 3220 3221 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3222 return hasSameType(T, U) || 3223 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3224 getFunctionTypeWithoutPtrSizes(U)); 3225 } 3226 3227 void ASTContext::adjustExceptionSpec( 3228 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3229 bool AsWritten) { 3230 // Update the type. 3231 QualType Updated = 3232 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3233 FD->setType(Updated); 3234 3235 if (!AsWritten) 3236 return; 3237 3238 // Update the type in the type source information too. 3239 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3240 // If the type and the type-as-written differ, we may need to update 3241 // the type-as-written too. 3242 if (TSInfo->getType() != FD->getType()) 3243 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3244 3245 // FIXME: When we get proper type location information for exceptions, 3246 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3247 // up the TypeSourceInfo; 3248 assert(TypeLoc::getFullDataSizeForType(Updated) == 3249 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3250 "TypeLoc size mismatch from updating exception specification"); 3251 TSInfo->overrideType(Updated); 3252 } 3253 } 3254 3255 /// getComplexType - Return the uniqued reference to the type for a complex 3256 /// number with the specified element type. 3257 QualType ASTContext::getComplexType(QualType T) const { 3258 // Unique pointers, to guarantee there is only one pointer of a particular 3259 // structure. 3260 llvm::FoldingSetNodeID ID; 3261 ComplexType::Profile(ID, T); 3262 3263 void *InsertPos = nullptr; 3264 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3265 return QualType(CT, 0); 3266 3267 // If the pointee type isn't canonical, this won't be a canonical type either, 3268 // so fill in the canonical type field. 3269 QualType Canonical; 3270 if (!T.isCanonical()) { 3271 Canonical = getComplexType(getCanonicalType(T)); 3272 3273 // Get the new insert position for the node we care about. 3274 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3275 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3276 } 3277 auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 3278 Types.push_back(New); 3279 ComplexTypes.InsertNode(New, InsertPos); 3280 return QualType(New, 0); 3281 } 3282 3283 /// getPointerType - Return the uniqued reference to the type for a pointer to 3284 /// the specified type. 3285 QualType ASTContext::getPointerType(QualType T) const { 3286 // Unique pointers, to guarantee there is only one pointer of a particular 3287 // structure. 3288 llvm::FoldingSetNodeID ID; 3289 PointerType::Profile(ID, T); 3290 3291 void *InsertPos = nullptr; 3292 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3293 return QualType(PT, 0); 3294 3295 // If the pointee type isn't canonical, this won't be a canonical type either, 3296 // so fill in the canonical type field. 3297 QualType Canonical; 3298 if (!T.isCanonical()) { 3299 Canonical = getPointerType(getCanonicalType(T)); 3300 3301 // Get the new insert position for the node we care about. 3302 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3303 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3304 } 3305 auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); 3306 Types.push_back(New); 3307 PointerTypes.InsertNode(New, InsertPos); 3308 return QualType(New, 0); 3309 } 3310 3311 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3312 llvm::FoldingSetNodeID ID; 3313 AdjustedType::Profile(ID, Orig, New); 3314 void *InsertPos = nullptr; 3315 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3316 if (AT) 3317 return QualType(AT, 0); 3318 3319 QualType Canonical = getCanonicalType(New); 3320 3321 // Get the new insert position for the node we care about. 3322 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3323 assert(!AT && "Shouldn't be in the map!"); 3324 3325 AT = new (*this, TypeAlignment) 3326 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3327 Types.push_back(AT); 3328 AdjustedTypes.InsertNode(AT, InsertPos); 3329 return QualType(AT, 0); 3330 } 3331 3332 QualType ASTContext::getDecayedType(QualType T) const { 3333 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3334 3335 QualType Decayed; 3336 3337 // C99 6.7.5.3p7: 3338 // A declaration of a parameter as "array of type" shall be 3339 // adjusted to "qualified pointer to type", where the type 3340 // qualifiers (if any) are those specified within the [ and ] of 3341 // the array type derivation. 3342 if (T->isArrayType()) 3343 Decayed = getArrayDecayedType(T); 3344 3345 // C99 6.7.5.3p8: 3346 // A declaration of a parameter as "function returning type" 3347 // shall be adjusted to "pointer to function returning type", as 3348 // in 6.3.2.1. 3349 if (T->isFunctionType()) 3350 Decayed = getPointerType(T); 3351 3352 llvm::FoldingSetNodeID ID; 3353 AdjustedType::Profile(ID, T, Decayed); 3354 void *InsertPos = nullptr; 3355 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3356 if (AT) 3357 return QualType(AT, 0); 3358 3359 QualType Canonical = getCanonicalType(Decayed); 3360 3361 // Get the new insert position for the node we care about. 3362 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3363 assert(!AT && "Shouldn't be in the map!"); 3364 3365 AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); 3366 Types.push_back(AT); 3367 AdjustedTypes.InsertNode(AT, InsertPos); 3368 return QualType(AT, 0); 3369 } 3370 3371 /// getBlockPointerType - Return the uniqued reference to the type for 3372 /// a pointer to the specified block. 3373 QualType ASTContext::getBlockPointerType(QualType T) const { 3374 assert(T->isFunctionType() && "block of function types only"); 3375 // Unique pointers, to guarantee there is only one block of a particular 3376 // structure. 3377 llvm::FoldingSetNodeID ID; 3378 BlockPointerType::Profile(ID, T); 3379 3380 void *InsertPos = nullptr; 3381 if (BlockPointerType *PT = 3382 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3383 return QualType(PT, 0); 3384 3385 // If the block pointee type isn't canonical, this won't be a canonical 3386 // type either so fill in the canonical type field. 3387 QualType Canonical; 3388 if (!T.isCanonical()) { 3389 Canonical = getBlockPointerType(getCanonicalType(T)); 3390 3391 // Get the new insert position for the node we care about. 3392 BlockPointerType *NewIP = 3393 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3394 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3395 } 3396 auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 3397 Types.push_back(New); 3398 BlockPointerTypes.InsertNode(New, InsertPos); 3399 return QualType(New, 0); 3400 } 3401 3402 /// getLValueReferenceType - Return the uniqued reference to the type for an 3403 /// lvalue reference to the specified type. 3404 QualType 3405 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3406 assert((!T->isPlaceholderType() || 3407 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3408 "Unresolved placeholder type"); 3409 3410 // Unique pointers, to guarantee there is only one pointer of a particular 3411 // structure. 3412 llvm::FoldingSetNodeID ID; 3413 ReferenceType::Profile(ID, T, SpelledAsLValue); 3414 3415 void *InsertPos = nullptr; 3416 if (LValueReferenceType *RT = 3417 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3418 return QualType(RT, 0); 3419 3420 const auto *InnerRef = T->getAs<ReferenceType>(); 3421 3422 // If the referencee type isn't canonical, this won't be a canonical type 3423 // either, so fill in the canonical type field. 3424 QualType Canonical; 3425 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3426 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3427 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3428 3429 // Get the new insert position for the node we care about. 3430 LValueReferenceType *NewIP = 3431 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3432 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3433 } 3434 3435 auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 3436 SpelledAsLValue); 3437 Types.push_back(New); 3438 LValueReferenceTypes.InsertNode(New, InsertPos); 3439 3440 return QualType(New, 0); 3441 } 3442 3443 /// getRValueReferenceType - Return the uniqued reference to the type for an 3444 /// rvalue reference to the specified type. 3445 QualType ASTContext::getRValueReferenceType(QualType T) const { 3446 assert((!T->isPlaceholderType() || 3447 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3448 "Unresolved placeholder type"); 3449 3450 // Unique pointers, to guarantee there is only one pointer of a particular 3451 // structure. 3452 llvm::FoldingSetNodeID ID; 3453 ReferenceType::Profile(ID, T, false); 3454 3455 void *InsertPos = nullptr; 3456 if (RValueReferenceType *RT = 3457 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3458 return QualType(RT, 0); 3459 3460 const auto *InnerRef = T->getAs<ReferenceType>(); 3461 3462 // If the referencee type isn't canonical, this won't be a canonical type 3463 // either, so fill in the canonical type field. 3464 QualType Canonical; 3465 if (InnerRef || !T.isCanonical()) { 3466 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3467 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3468 3469 // Get the new insert position for the node we care about. 3470 RValueReferenceType *NewIP = 3471 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3472 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3473 } 3474 3475 auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 3476 Types.push_back(New); 3477 RValueReferenceTypes.InsertNode(New, InsertPos); 3478 return QualType(New, 0); 3479 } 3480 3481 /// getMemberPointerType - Return the uniqued reference to the type for a 3482 /// member pointer to the specified type, in the specified class. 3483 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3484 // Unique pointers, to guarantee there is only one pointer of a particular 3485 // structure. 3486 llvm::FoldingSetNodeID ID; 3487 MemberPointerType::Profile(ID, T, Cls); 3488 3489 void *InsertPos = nullptr; 3490 if (MemberPointerType *PT = 3491 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3492 return QualType(PT, 0); 3493 3494 // If the pointee or class type isn't canonical, this won't be a canonical 3495 // type either, so fill in the canonical type field. 3496 QualType Canonical; 3497 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3498 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3499 3500 // Get the new insert position for the node we care about. 3501 MemberPointerType *NewIP = 3502 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3503 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3504 } 3505 auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 3506 Types.push_back(New); 3507 MemberPointerTypes.InsertNode(New, InsertPos); 3508 return QualType(New, 0); 3509 } 3510 3511 /// getConstantArrayType - Return the unique reference to the type for an 3512 /// array of the specified element type. 3513 QualType ASTContext::getConstantArrayType(QualType EltTy, 3514 const llvm::APInt &ArySizeIn, 3515 const Expr *SizeExpr, 3516 ArrayType::ArraySizeModifier ASM, 3517 unsigned IndexTypeQuals) const { 3518 assert((EltTy->isDependentType() || 3519 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3520 "Constant array of VLAs is illegal!"); 3521 3522 // We only need the size as part of the type if it's instantiation-dependent. 3523 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3524 SizeExpr = nullptr; 3525 3526 // Convert the array size into a canonical width matching the pointer size for 3527 // the target. 3528 llvm::APInt ArySize(ArySizeIn); 3529 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3530 3531 llvm::FoldingSetNodeID ID; 3532 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3533 IndexTypeQuals); 3534 3535 void *InsertPos = nullptr; 3536 if (ConstantArrayType *ATP = 3537 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3538 return QualType(ATP, 0); 3539 3540 // If the element type isn't canonical or has qualifiers, or the array bound 3541 // is instantiation-dependent, this won't be a canonical type either, so fill 3542 // in the canonical type field. 3543 QualType Canon; 3544 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3545 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3546 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3547 ASM, IndexTypeQuals); 3548 Canon = getQualifiedType(Canon, canonSplit.Quals); 3549 3550 // Get the new insert position for the node we care about. 3551 ConstantArrayType *NewIP = 3552 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3553 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3554 } 3555 3556 void *Mem = Allocate( 3557 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3558 TypeAlignment); 3559 auto *New = new (Mem) 3560 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3561 ConstantArrayTypes.InsertNode(New, InsertPos); 3562 Types.push_back(New); 3563 return QualType(New, 0); 3564 } 3565 3566 /// getVariableArrayDecayedType - Turns the given type, which may be 3567 /// variably-modified, into the corresponding type with all the known 3568 /// sizes replaced with [*]. 3569 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3570 // Vastly most common case. 3571 if (!type->isVariablyModifiedType()) return type; 3572 3573 QualType result; 3574 3575 SplitQualType split = type.getSplitDesugaredType(); 3576 const Type *ty = split.Ty; 3577 switch (ty->getTypeClass()) { 3578 #define TYPE(Class, Base) 3579 #define ABSTRACT_TYPE(Class, Base) 3580 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3581 #include "clang/AST/TypeNodes.inc" 3582 llvm_unreachable("didn't desugar past all non-canonical types?"); 3583 3584 // These types should never be variably-modified. 3585 case Type::Builtin: 3586 case Type::Complex: 3587 case Type::Vector: 3588 case Type::DependentVector: 3589 case Type::ExtVector: 3590 case Type::DependentSizedExtVector: 3591 case Type::ConstantMatrix: 3592 case Type::DependentSizedMatrix: 3593 case Type::DependentAddressSpace: 3594 case Type::ObjCObject: 3595 case Type::ObjCInterface: 3596 case Type::ObjCObjectPointer: 3597 case Type::Record: 3598 case Type::Enum: 3599 case Type::UnresolvedUsing: 3600 case Type::TypeOfExpr: 3601 case Type::TypeOf: 3602 case Type::Decltype: 3603 case Type::UnaryTransform: 3604 case Type::DependentName: 3605 case Type::InjectedClassName: 3606 case Type::TemplateSpecialization: 3607 case Type::DependentTemplateSpecialization: 3608 case Type::TemplateTypeParm: 3609 case Type::SubstTemplateTypeParmPack: 3610 case Type::Auto: 3611 case Type::DeducedTemplateSpecialization: 3612 case Type::PackExpansion: 3613 case Type::BitInt: 3614 case Type::DependentBitInt: 3615 llvm_unreachable("type should never be variably-modified"); 3616 3617 // These types can be variably-modified but should never need to 3618 // further decay. 3619 case Type::FunctionNoProto: 3620 case Type::FunctionProto: 3621 case Type::BlockPointer: 3622 case Type::MemberPointer: 3623 case Type::Pipe: 3624 return type; 3625 3626 // These types can be variably-modified. All these modifications 3627 // preserve structure except as noted by comments. 3628 // TODO: if we ever care about optimizing VLAs, there are no-op 3629 // optimizations available here. 3630 case Type::Pointer: 3631 result = getPointerType(getVariableArrayDecayedType( 3632 cast<PointerType>(ty)->getPointeeType())); 3633 break; 3634 3635 case Type::LValueReference: { 3636 const auto *lv = cast<LValueReferenceType>(ty); 3637 result = getLValueReferenceType( 3638 getVariableArrayDecayedType(lv->getPointeeType()), 3639 lv->isSpelledAsLValue()); 3640 break; 3641 } 3642 3643 case Type::RValueReference: { 3644 const auto *lv = cast<RValueReferenceType>(ty); 3645 result = getRValueReferenceType( 3646 getVariableArrayDecayedType(lv->getPointeeType())); 3647 break; 3648 } 3649 3650 case Type::Atomic: { 3651 const auto *at = cast<AtomicType>(ty); 3652 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3653 break; 3654 } 3655 3656 case Type::ConstantArray: { 3657 const auto *cat = cast<ConstantArrayType>(ty); 3658 result = getConstantArrayType( 3659 getVariableArrayDecayedType(cat->getElementType()), 3660 cat->getSize(), 3661 cat->getSizeExpr(), 3662 cat->getSizeModifier(), 3663 cat->getIndexTypeCVRQualifiers()); 3664 break; 3665 } 3666 3667 case Type::DependentSizedArray: { 3668 const auto *dat = cast<DependentSizedArrayType>(ty); 3669 result = getDependentSizedArrayType( 3670 getVariableArrayDecayedType(dat->getElementType()), 3671 dat->getSizeExpr(), 3672 dat->getSizeModifier(), 3673 dat->getIndexTypeCVRQualifiers(), 3674 dat->getBracketsRange()); 3675 break; 3676 } 3677 3678 // Turn incomplete types into [*] types. 3679 case Type::IncompleteArray: { 3680 const auto *iat = cast<IncompleteArrayType>(ty); 3681 result = getVariableArrayType( 3682 getVariableArrayDecayedType(iat->getElementType()), 3683 /*size*/ nullptr, 3684 ArrayType::Normal, 3685 iat->getIndexTypeCVRQualifiers(), 3686 SourceRange()); 3687 break; 3688 } 3689 3690 // Turn VLA types into [*] types. 3691 case Type::VariableArray: { 3692 const auto *vat = cast<VariableArrayType>(ty); 3693 result = getVariableArrayType( 3694 getVariableArrayDecayedType(vat->getElementType()), 3695 /*size*/ nullptr, 3696 ArrayType::Star, 3697 vat->getIndexTypeCVRQualifiers(), 3698 vat->getBracketsRange()); 3699 break; 3700 } 3701 } 3702 3703 // Apply the top-level qualifiers from the original. 3704 return getQualifiedType(result, split.Quals); 3705 } 3706 3707 /// getVariableArrayType - Returns a non-unique reference to the type for a 3708 /// variable array of the specified element type. 3709 QualType ASTContext::getVariableArrayType(QualType EltTy, 3710 Expr *NumElts, 3711 ArrayType::ArraySizeModifier ASM, 3712 unsigned IndexTypeQuals, 3713 SourceRange Brackets) const { 3714 // Since we don't unique expressions, it isn't possible to unique VLA's 3715 // that have an expression provided for their size. 3716 QualType Canon; 3717 3718 // Be sure to pull qualifiers off the element type. 3719 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3720 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3721 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3722 IndexTypeQuals, Brackets); 3723 Canon = getQualifiedType(Canon, canonSplit.Quals); 3724 } 3725 3726 auto *New = new (*this, TypeAlignment) 3727 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3728 3729 VariableArrayTypes.push_back(New); 3730 Types.push_back(New); 3731 return QualType(New, 0); 3732 } 3733 3734 /// getDependentSizedArrayType - Returns a non-unique reference to 3735 /// the type for a dependently-sized array of the specified element 3736 /// type. 3737 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3738 Expr *numElements, 3739 ArrayType::ArraySizeModifier ASM, 3740 unsigned elementTypeQuals, 3741 SourceRange brackets) const { 3742 assert((!numElements || numElements->isTypeDependent() || 3743 numElements->isValueDependent()) && 3744 "Size must be type- or value-dependent!"); 3745 3746 // Dependently-sized array types that do not have a specified number 3747 // of elements will have their sizes deduced from a dependent 3748 // initializer. We do no canonicalization here at all, which is okay 3749 // because they can't be used in most locations. 3750 if (!numElements) { 3751 auto *newType 3752 = new (*this, TypeAlignment) 3753 DependentSizedArrayType(*this, elementType, QualType(), 3754 numElements, ASM, elementTypeQuals, 3755 brackets); 3756 Types.push_back(newType); 3757 return QualType(newType, 0); 3758 } 3759 3760 // Otherwise, we actually build a new type every time, but we 3761 // also build a canonical type. 3762 3763 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3764 3765 void *insertPos = nullptr; 3766 llvm::FoldingSetNodeID ID; 3767 DependentSizedArrayType::Profile(ID, *this, 3768 QualType(canonElementType.Ty, 0), 3769 ASM, elementTypeQuals, numElements); 3770 3771 // Look for an existing type with these properties. 3772 DependentSizedArrayType *canonTy = 3773 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3774 3775 // If we don't have one, build one. 3776 if (!canonTy) { 3777 canonTy = new (*this, TypeAlignment) 3778 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), 3779 QualType(), numElements, ASM, elementTypeQuals, 3780 brackets); 3781 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3782 Types.push_back(canonTy); 3783 } 3784 3785 // Apply qualifiers from the element type to the array. 3786 QualType canon = getQualifiedType(QualType(canonTy,0), 3787 canonElementType.Quals); 3788 3789 // If we didn't need extra canonicalization for the element type or the size 3790 // expression, then just use that as our result. 3791 if (QualType(canonElementType.Ty, 0) == elementType && 3792 canonTy->getSizeExpr() == numElements) 3793 return canon; 3794 3795 // Otherwise, we need to build a type which follows the spelling 3796 // of the element type. 3797 auto *sugaredType 3798 = new (*this, TypeAlignment) 3799 DependentSizedArrayType(*this, elementType, canon, numElements, 3800 ASM, elementTypeQuals, brackets); 3801 Types.push_back(sugaredType); 3802 return QualType(sugaredType, 0); 3803 } 3804 3805 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3806 ArrayType::ArraySizeModifier ASM, 3807 unsigned elementTypeQuals) const { 3808 llvm::FoldingSetNodeID ID; 3809 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3810 3811 void *insertPos = nullptr; 3812 if (IncompleteArrayType *iat = 3813 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3814 return QualType(iat, 0); 3815 3816 // If the element type isn't canonical, this won't be a canonical type 3817 // either, so fill in the canonical type field. We also have to pull 3818 // qualifiers off the element type. 3819 QualType canon; 3820 3821 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3822 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3823 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3824 ASM, elementTypeQuals); 3825 canon = getQualifiedType(canon, canonSplit.Quals); 3826 3827 // Get the new insert position for the node we care about. 3828 IncompleteArrayType *existing = 3829 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3830 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3831 } 3832 3833 auto *newType = new (*this, TypeAlignment) 3834 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3835 3836 IncompleteArrayTypes.InsertNode(newType, insertPos); 3837 Types.push_back(newType); 3838 return QualType(newType, 0); 3839 } 3840 3841 ASTContext::BuiltinVectorTypeInfo 3842 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3843 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3844 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3845 NUMVECTORS}; 3846 3847 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3848 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3849 3850 switch (Ty->getKind()) { 3851 default: 3852 llvm_unreachable("Unsupported builtin vector type"); 3853 case BuiltinType::SveInt8: 3854 return SVE_INT_ELTTY(8, 16, true, 1); 3855 case BuiltinType::SveUint8: 3856 return SVE_INT_ELTTY(8, 16, false, 1); 3857 case BuiltinType::SveInt8x2: 3858 return SVE_INT_ELTTY(8, 16, true, 2); 3859 case BuiltinType::SveUint8x2: 3860 return SVE_INT_ELTTY(8, 16, false, 2); 3861 case BuiltinType::SveInt8x3: 3862 return SVE_INT_ELTTY(8, 16, true, 3); 3863 case BuiltinType::SveUint8x3: 3864 return SVE_INT_ELTTY(8, 16, false, 3); 3865 case BuiltinType::SveInt8x4: 3866 return SVE_INT_ELTTY(8, 16, true, 4); 3867 case BuiltinType::SveUint8x4: 3868 return SVE_INT_ELTTY(8, 16, false, 4); 3869 case BuiltinType::SveInt16: 3870 return SVE_INT_ELTTY(16, 8, true, 1); 3871 case BuiltinType::SveUint16: 3872 return SVE_INT_ELTTY(16, 8, false, 1); 3873 case BuiltinType::SveInt16x2: 3874 return SVE_INT_ELTTY(16, 8, true, 2); 3875 case BuiltinType::SveUint16x2: 3876 return SVE_INT_ELTTY(16, 8, false, 2); 3877 case BuiltinType::SveInt16x3: 3878 return SVE_INT_ELTTY(16, 8, true, 3); 3879 case BuiltinType::SveUint16x3: 3880 return SVE_INT_ELTTY(16, 8, false, 3); 3881 case BuiltinType::SveInt16x4: 3882 return SVE_INT_ELTTY(16, 8, true, 4); 3883 case BuiltinType::SveUint16x4: 3884 return SVE_INT_ELTTY(16, 8, false, 4); 3885 case BuiltinType::SveInt32: 3886 return SVE_INT_ELTTY(32, 4, true, 1); 3887 case BuiltinType::SveUint32: 3888 return SVE_INT_ELTTY(32, 4, false, 1); 3889 case BuiltinType::SveInt32x2: 3890 return SVE_INT_ELTTY(32, 4, true, 2); 3891 case BuiltinType::SveUint32x2: 3892 return SVE_INT_ELTTY(32, 4, false, 2); 3893 case BuiltinType::SveInt32x3: 3894 return SVE_INT_ELTTY(32, 4, true, 3); 3895 case BuiltinType::SveUint32x3: 3896 return SVE_INT_ELTTY(32, 4, false, 3); 3897 case BuiltinType::SveInt32x4: 3898 return SVE_INT_ELTTY(32, 4, true, 4); 3899 case BuiltinType::SveUint32x4: 3900 return SVE_INT_ELTTY(32, 4, false, 4); 3901 case BuiltinType::SveInt64: 3902 return SVE_INT_ELTTY(64, 2, true, 1); 3903 case BuiltinType::SveUint64: 3904 return SVE_INT_ELTTY(64, 2, false, 1); 3905 case BuiltinType::SveInt64x2: 3906 return SVE_INT_ELTTY(64, 2, true, 2); 3907 case BuiltinType::SveUint64x2: 3908 return SVE_INT_ELTTY(64, 2, false, 2); 3909 case BuiltinType::SveInt64x3: 3910 return SVE_INT_ELTTY(64, 2, true, 3); 3911 case BuiltinType::SveUint64x3: 3912 return SVE_INT_ELTTY(64, 2, false, 3); 3913 case BuiltinType::SveInt64x4: 3914 return SVE_INT_ELTTY(64, 2, true, 4); 3915 case BuiltinType::SveUint64x4: 3916 return SVE_INT_ELTTY(64, 2, false, 4); 3917 case BuiltinType::SveBool: 3918 return SVE_ELTTY(BoolTy, 16, 1); 3919 case BuiltinType::SveFloat16: 3920 return SVE_ELTTY(HalfTy, 8, 1); 3921 case BuiltinType::SveFloat16x2: 3922 return SVE_ELTTY(HalfTy, 8, 2); 3923 case BuiltinType::SveFloat16x3: 3924 return SVE_ELTTY(HalfTy, 8, 3); 3925 case BuiltinType::SveFloat16x4: 3926 return SVE_ELTTY(HalfTy, 8, 4); 3927 case BuiltinType::SveFloat32: 3928 return SVE_ELTTY(FloatTy, 4, 1); 3929 case BuiltinType::SveFloat32x2: 3930 return SVE_ELTTY(FloatTy, 4, 2); 3931 case BuiltinType::SveFloat32x3: 3932 return SVE_ELTTY(FloatTy, 4, 3); 3933 case BuiltinType::SveFloat32x4: 3934 return SVE_ELTTY(FloatTy, 4, 4); 3935 case BuiltinType::SveFloat64: 3936 return SVE_ELTTY(DoubleTy, 2, 1); 3937 case BuiltinType::SveFloat64x2: 3938 return SVE_ELTTY(DoubleTy, 2, 2); 3939 case BuiltinType::SveFloat64x3: 3940 return SVE_ELTTY(DoubleTy, 2, 3); 3941 case BuiltinType::SveFloat64x4: 3942 return SVE_ELTTY(DoubleTy, 2, 4); 3943 case BuiltinType::SveBFloat16: 3944 return SVE_ELTTY(BFloat16Ty, 8, 1); 3945 case BuiltinType::SveBFloat16x2: 3946 return SVE_ELTTY(BFloat16Ty, 8, 2); 3947 case BuiltinType::SveBFloat16x3: 3948 return SVE_ELTTY(BFloat16Ty, 8, 3); 3949 case BuiltinType::SveBFloat16x4: 3950 return SVE_ELTTY(BFloat16Ty, 8, 4); 3951 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3952 IsSigned) \ 3953 case BuiltinType::Id: \ 3954 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3955 llvm::ElementCount::getScalable(NumEls), NF}; 3956 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3957 case BuiltinType::Id: \ 3958 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3959 llvm::ElementCount::getScalable(NumEls), NF}; 3960 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3961 case BuiltinType::Id: \ 3962 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3963 #include "clang/Basic/RISCVVTypes.def" 3964 } 3965 } 3966 3967 /// getScalableVectorType - Return the unique reference to a scalable vector 3968 /// type of the specified element type and size. VectorType must be a built-in 3969 /// type. 3970 QualType ASTContext::getScalableVectorType(QualType EltTy, 3971 unsigned NumElts) const { 3972 if (Target->hasAArch64SVETypes()) { 3973 uint64_t EltTySize = getTypeSize(EltTy); 3974 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3975 IsSigned, IsFP, IsBF) \ 3976 if (!EltTy->isBooleanType() && \ 3977 ((EltTy->hasIntegerRepresentation() && \ 3978 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3979 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3980 IsFP && !IsBF) || \ 3981 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3982 IsBF && !IsFP)) && \ 3983 EltTySize == ElBits && NumElts == NumEls) { \ 3984 return SingletonId; \ 3985 } 3986 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3987 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3988 return SingletonId; 3989 #include "clang/Basic/AArch64SVEACLETypes.def" 3990 } else if (Target->hasRISCVVTypes()) { 3991 uint64_t EltTySize = getTypeSize(EltTy); 3992 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3993 IsFP) \ 3994 if (!EltTy->isBooleanType() && \ 3995 ((EltTy->hasIntegerRepresentation() && \ 3996 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3997 (EltTy->hasFloatingRepresentation() && IsFP)) && \ 3998 EltTySize == ElBits && NumElts == NumEls) \ 3999 return SingletonId; 4000 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 4001 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4002 return SingletonId; 4003 #include "clang/Basic/RISCVVTypes.def" 4004 } 4005 return QualType(); 4006 } 4007 4008 /// getVectorType - Return the unique reference to a vector type of 4009 /// the specified element type and size. VectorType must be a built-in type. 4010 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4011 VectorType::VectorKind VecKind) const { 4012 assert(vecType->isBuiltinType()); 4013 4014 // Check if we've already instantiated a vector of this type. 4015 llvm::FoldingSetNodeID ID; 4016 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4017 4018 void *InsertPos = nullptr; 4019 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4020 return QualType(VTP, 0); 4021 4022 // If the element type isn't canonical, this won't be a canonical type either, 4023 // so fill in the canonical type field. 4024 QualType Canonical; 4025 if (!vecType.isCanonical()) { 4026 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4027 4028 // Get the new insert position for the node we care about. 4029 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4030 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4031 } 4032 auto *New = new (*this, TypeAlignment) 4033 VectorType(vecType, NumElts, Canonical, VecKind); 4034 VectorTypes.InsertNode(New, InsertPos); 4035 Types.push_back(New); 4036 return QualType(New, 0); 4037 } 4038 4039 QualType 4040 ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4041 SourceLocation AttrLoc, 4042 VectorType::VectorKind VecKind) const { 4043 llvm::FoldingSetNodeID ID; 4044 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4045 VecKind); 4046 void *InsertPos = nullptr; 4047 DependentVectorType *Canon = 4048 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4049 DependentVectorType *New; 4050 4051 if (Canon) { 4052 New = new (*this, TypeAlignment) DependentVectorType( 4053 *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4054 } else { 4055 QualType CanonVecTy = getCanonicalType(VecType); 4056 if (CanonVecTy == VecType) { 4057 New = new (*this, TypeAlignment) DependentVectorType( 4058 *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4059 4060 DependentVectorType *CanonCheck = 4061 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4062 assert(!CanonCheck && 4063 "Dependent-sized vector_size canonical type broken"); 4064 (void)CanonCheck; 4065 DependentVectorTypes.InsertNode(New, InsertPos); 4066 } else { 4067 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4068 SourceLocation(), VecKind); 4069 New = new (*this, TypeAlignment) DependentVectorType( 4070 *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4071 } 4072 } 4073 4074 Types.push_back(New); 4075 return QualType(New, 0); 4076 } 4077 4078 /// getExtVectorType - Return the unique reference to an extended vector type of 4079 /// the specified element type and size. VectorType must be a built-in type. 4080 QualType 4081 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { 4082 assert(vecType->isBuiltinType() || vecType->isDependentType()); 4083 4084 // Check if we've already instantiated a vector of this type. 4085 llvm::FoldingSetNodeID ID; 4086 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4087 VectorType::GenericVector); 4088 void *InsertPos = nullptr; 4089 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4090 return QualType(VTP, 0); 4091 4092 // If the element type isn't canonical, this won't be a canonical type either, 4093 // so fill in the canonical type field. 4094 QualType Canonical; 4095 if (!vecType.isCanonical()) { 4096 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4097 4098 // Get the new insert position for the node we care about. 4099 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4100 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4101 } 4102 auto *New = new (*this, TypeAlignment) 4103 ExtVectorType(vecType, NumElts, Canonical); 4104 VectorTypes.InsertNode(New, InsertPos); 4105 Types.push_back(New); 4106 return QualType(New, 0); 4107 } 4108 4109 QualType 4110 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4111 Expr *SizeExpr, 4112 SourceLocation AttrLoc) const { 4113 llvm::FoldingSetNodeID ID; 4114 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4115 SizeExpr); 4116 4117 void *InsertPos = nullptr; 4118 DependentSizedExtVectorType *Canon 4119 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4120 DependentSizedExtVectorType *New; 4121 if (Canon) { 4122 // We already have a canonical version of this array type; use it as 4123 // the canonical type for a newly-built type. 4124 New = new (*this, TypeAlignment) 4125 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 4126 SizeExpr, AttrLoc); 4127 } else { 4128 QualType CanonVecTy = getCanonicalType(vecType); 4129 if (CanonVecTy == vecType) { 4130 New = new (*this, TypeAlignment) 4131 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 4132 AttrLoc); 4133 4134 DependentSizedExtVectorType *CanonCheck 4135 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4136 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4137 (void)CanonCheck; 4138 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4139 } else { 4140 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4141 SourceLocation()); 4142 New = new (*this, TypeAlignment) DependentSizedExtVectorType( 4143 *this, vecType, CanonExtTy, SizeExpr, AttrLoc); 4144 } 4145 } 4146 4147 Types.push_back(New); 4148 return QualType(New, 0); 4149 } 4150 4151 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4152 unsigned NumColumns) const { 4153 llvm::FoldingSetNodeID ID; 4154 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4155 Type::ConstantMatrix); 4156 4157 assert(MatrixType::isValidElementType(ElementTy) && 4158 "need a valid element type"); 4159 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4160 ConstantMatrixType::isDimensionValid(NumColumns) && 4161 "need valid matrix dimensions"); 4162 void *InsertPos = nullptr; 4163 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4164 return QualType(MTP, 0); 4165 4166 QualType Canonical; 4167 if (!ElementTy.isCanonical()) { 4168 Canonical = 4169 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4170 4171 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4172 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4173 (void)NewIP; 4174 } 4175 4176 auto *New = new (*this, TypeAlignment) 4177 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4178 MatrixTypes.InsertNode(New, InsertPos); 4179 Types.push_back(New); 4180 return QualType(New, 0); 4181 } 4182 4183 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4184 Expr *RowExpr, 4185 Expr *ColumnExpr, 4186 SourceLocation AttrLoc) const { 4187 QualType CanonElementTy = getCanonicalType(ElementTy); 4188 llvm::FoldingSetNodeID ID; 4189 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4190 ColumnExpr); 4191 4192 void *InsertPos = nullptr; 4193 DependentSizedMatrixType *Canon = 4194 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4195 4196 if (!Canon) { 4197 Canon = new (*this, TypeAlignment) DependentSizedMatrixType( 4198 *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); 4199 #ifndef NDEBUG 4200 DependentSizedMatrixType *CanonCheck = 4201 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4202 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4203 #endif 4204 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4205 Types.push_back(Canon); 4206 } 4207 4208 // Already have a canonical version of the matrix type 4209 // 4210 // If it exactly matches the requested type, use it directly. 4211 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4212 Canon->getRowExpr() == ColumnExpr) 4213 return QualType(Canon, 0); 4214 4215 // Use Canon as the canonical type for newly-built type. 4216 DependentSizedMatrixType *New = new (*this, TypeAlignment) 4217 DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, 4218 ColumnExpr, AttrLoc); 4219 Types.push_back(New); 4220 return QualType(New, 0); 4221 } 4222 4223 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4224 Expr *AddrSpaceExpr, 4225 SourceLocation AttrLoc) const { 4226 assert(AddrSpaceExpr->isInstantiationDependent()); 4227 4228 QualType canonPointeeType = getCanonicalType(PointeeType); 4229 4230 void *insertPos = nullptr; 4231 llvm::FoldingSetNodeID ID; 4232 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4233 AddrSpaceExpr); 4234 4235 DependentAddressSpaceType *canonTy = 4236 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4237 4238 if (!canonTy) { 4239 canonTy = new (*this, TypeAlignment) 4240 DependentAddressSpaceType(*this, canonPointeeType, 4241 QualType(), AddrSpaceExpr, AttrLoc); 4242 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4243 Types.push_back(canonTy); 4244 } 4245 4246 if (canonPointeeType == PointeeType && 4247 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4248 return QualType(canonTy, 0); 4249 4250 auto *sugaredType 4251 = new (*this, TypeAlignment) 4252 DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), 4253 AddrSpaceExpr, AttrLoc); 4254 Types.push_back(sugaredType); 4255 return QualType(sugaredType, 0); 4256 } 4257 4258 /// Determine whether \p T is canonical as the result type of a function. 4259 static bool isCanonicalResultType(QualType T) { 4260 return T.isCanonical() && 4261 (T.getObjCLifetime() == Qualifiers::OCL_None || 4262 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4263 } 4264 4265 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4266 QualType 4267 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4268 const FunctionType::ExtInfo &Info) const { 4269 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4270 // functionality creates a function without a prototype regardless of 4271 // language mode (so it makes them even in C++). Once the rewriter has been 4272 // fixed, this assertion can be enabled again. 4273 //assert(!LangOpts.requiresStrictPrototypes() && 4274 // "strict prototypes are disabled"); 4275 4276 // Unique functions, to guarantee there is only one function of a particular 4277 // structure. 4278 llvm::FoldingSetNodeID ID; 4279 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4280 4281 void *InsertPos = nullptr; 4282 if (FunctionNoProtoType *FT = 4283 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4284 return QualType(FT, 0); 4285 4286 QualType Canonical; 4287 if (!isCanonicalResultType(ResultTy)) { 4288 Canonical = 4289 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4290 4291 // Get the new insert position for the node we care about. 4292 FunctionNoProtoType *NewIP = 4293 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4294 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4295 } 4296 4297 auto *New = new (*this, TypeAlignment) 4298 FunctionNoProtoType(ResultTy, Canonical, Info); 4299 Types.push_back(New); 4300 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4301 return QualType(New, 0); 4302 } 4303 4304 CanQualType 4305 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4306 CanQualType CanResultType = getCanonicalType(ResultType); 4307 4308 // Canonical result types do not have ARC lifetime qualifiers. 4309 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4310 Qualifiers Qs = CanResultType.getQualifiers(); 4311 Qs.removeObjCLifetime(); 4312 return CanQualType::CreateUnsafe( 4313 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4314 } 4315 4316 return CanResultType; 4317 } 4318 4319 static bool isCanonicalExceptionSpecification( 4320 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4321 if (ESI.Type == EST_None) 4322 return true; 4323 if (!NoexceptInType) 4324 return false; 4325 4326 // C++17 onwards: exception specification is part of the type, as a simple 4327 // boolean "can this function type throw". 4328 if (ESI.Type == EST_BasicNoexcept) 4329 return true; 4330 4331 // A noexcept(expr) specification is (possibly) canonical if expr is 4332 // value-dependent. 4333 if (ESI.Type == EST_DependentNoexcept) 4334 return true; 4335 4336 // A dynamic exception specification is canonical if it only contains pack 4337 // expansions (so we can't tell whether it's non-throwing) and all its 4338 // contained types are canonical. 4339 if (ESI.Type == EST_Dynamic) { 4340 bool AnyPackExpansions = false; 4341 for (QualType ET : ESI.Exceptions) { 4342 if (!ET.isCanonical()) 4343 return false; 4344 if (ET->getAs<PackExpansionType>()) 4345 AnyPackExpansions = true; 4346 } 4347 return AnyPackExpansions; 4348 } 4349 4350 return false; 4351 } 4352 4353 QualType ASTContext::getFunctionTypeInternal( 4354 QualType ResultTy, ArrayRef<QualType> ArgArray, 4355 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4356 size_t NumArgs = ArgArray.size(); 4357 4358 // Unique functions, to guarantee there is only one function of a particular 4359 // structure. 4360 llvm::FoldingSetNodeID ID; 4361 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4362 *this, true); 4363 4364 QualType Canonical; 4365 bool Unique = false; 4366 4367 void *InsertPos = nullptr; 4368 if (FunctionProtoType *FPT = 4369 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4370 QualType Existing = QualType(FPT, 0); 4371 4372 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4373 // it so long as our exception specification doesn't contain a dependent 4374 // noexcept expression, or we're just looking for a canonical type. 4375 // Otherwise, we're going to need to create a type 4376 // sugar node to hold the concrete expression. 4377 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4378 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4379 return Existing; 4380 4381 // We need a new type sugar node for this one, to hold the new noexcept 4382 // expression. We do no canonicalization here, but that's OK since we don't 4383 // expect to see the same noexcept expression much more than once. 4384 Canonical = getCanonicalType(Existing); 4385 Unique = true; 4386 } 4387 4388 bool NoexceptInType = getLangOpts().CPlusPlus17; 4389 bool IsCanonicalExceptionSpec = 4390 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4391 4392 // Determine whether the type being created is already canonical or not. 4393 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4394 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4395 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4396 if (!ArgArray[i].isCanonicalAsParam()) 4397 isCanonical = false; 4398 4399 if (OnlyWantCanonical) 4400 assert(isCanonical && 4401 "given non-canonical parameters constructing canonical type"); 4402 4403 // If this type isn't canonical, get the canonical version of it if we don't 4404 // already have it. The exception spec is only partially part of the 4405 // canonical type, and only in C++17 onwards. 4406 if (!isCanonical && Canonical.isNull()) { 4407 SmallVector<QualType, 16> CanonicalArgs; 4408 CanonicalArgs.reserve(NumArgs); 4409 for (unsigned i = 0; i != NumArgs; ++i) 4410 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4411 4412 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4413 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4414 CanonicalEPI.HasTrailingReturn = false; 4415 4416 if (IsCanonicalExceptionSpec) { 4417 // Exception spec is already OK. 4418 } else if (NoexceptInType) { 4419 switch (EPI.ExceptionSpec.Type) { 4420 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4421 // We don't know yet. It shouldn't matter what we pick here; no-one 4422 // should ever look at this. 4423 LLVM_FALLTHROUGH; 4424 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4425 CanonicalEPI.ExceptionSpec.Type = EST_None; 4426 break; 4427 4428 // A dynamic exception specification is almost always "not noexcept", 4429 // with the exception that a pack expansion might expand to no types. 4430 case EST_Dynamic: { 4431 bool AnyPacks = false; 4432 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4433 if (ET->getAs<PackExpansionType>()) 4434 AnyPacks = true; 4435 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4436 } 4437 if (!AnyPacks) 4438 CanonicalEPI.ExceptionSpec.Type = EST_None; 4439 else { 4440 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4441 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4442 } 4443 break; 4444 } 4445 4446 case EST_DynamicNone: 4447 case EST_BasicNoexcept: 4448 case EST_NoexceptTrue: 4449 case EST_NoThrow: 4450 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4451 break; 4452 4453 case EST_DependentNoexcept: 4454 llvm_unreachable("dependent noexcept is already canonical"); 4455 } 4456 } else { 4457 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4458 } 4459 4460 // Adjust the canonical function result type. 4461 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4462 Canonical = 4463 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4464 4465 // Get the new insert position for the node we care about. 4466 FunctionProtoType *NewIP = 4467 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4468 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4469 } 4470 4471 // Compute the needed size to hold this FunctionProtoType and the 4472 // various trailing objects. 4473 auto ESH = FunctionProtoType::getExceptionSpecSize( 4474 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4475 size_t Size = FunctionProtoType::totalSizeToAlloc< 4476 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4477 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4478 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4479 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4480 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4481 EPI.ExtParameterInfos ? NumArgs : 0, 4482 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4483 4484 auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); 4485 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4486 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4487 Types.push_back(FTP); 4488 if (!Unique) 4489 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4490 return QualType(FTP, 0); 4491 } 4492 4493 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4494 llvm::FoldingSetNodeID ID; 4495 PipeType::Profile(ID, T, ReadOnly); 4496 4497 void *InsertPos = nullptr; 4498 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4499 return QualType(PT, 0); 4500 4501 // If the pipe element type isn't canonical, this won't be a canonical type 4502 // either, so fill in the canonical type field. 4503 QualType Canonical; 4504 if (!T.isCanonical()) { 4505 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4506 4507 // Get the new insert position for the node we care about. 4508 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4509 assert(!NewIP && "Shouldn't be in the map!"); 4510 (void)NewIP; 4511 } 4512 auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); 4513 Types.push_back(New); 4514 PipeTypes.InsertNode(New, InsertPos); 4515 return QualType(New, 0); 4516 } 4517 4518 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4519 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4520 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4521 : Ty; 4522 } 4523 4524 QualType ASTContext::getReadPipeType(QualType T) const { 4525 return getPipeType(T, true); 4526 } 4527 4528 QualType ASTContext::getWritePipeType(QualType T) const { 4529 return getPipeType(T, false); 4530 } 4531 4532 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4533 llvm::FoldingSetNodeID ID; 4534 BitIntType::Profile(ID, IsUnsigned, NumBits); 4535 4536 void *InsertPos = nullptr; 4537 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4538 return QualType(EIT, 0); 4539 4540 auto *New = new (*this, TypeAlignment) BitIntType(IsUnsigned, NumBits); 4541 BitIntTypes.InsertNode(New, InsertPos); 4542 Types.push_back(New); 4543 return QualType(New, 0); 4544 } 4545 4546 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4547 Expr *NumBitsExpr) const { 4548 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4549 llvm::FoldingSetNodeID ID; 4550 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4551 4552 void *InsertPos = nullptr; 4553 if (DependentBitIntType *Existing = 4554 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4555 return QualType(Existing, 0); 4556 4557 auto *New = new (*this, TypeAlignment) 4558 DependentBitIntType(*this, IsUnsigned, NumBitsExpr); 4559 DependentBitIntTypes.InsertNode(New, InsertPos); 4560 4561 Types.push_back(New); 4562 return QualType(New, 0); 4563 } 4564 4565 #ifndef NDEBUG 4566 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4567 if (!isa<CXXRecordDecl>(D)) return false; 4568 const auto *RD = cast<CXXRecordDecl>(D); 4569 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4570 return true; 4571 if (RD->getDescribedClassTemplate() && 4572 !isa<ClassTemplateSpecializationDecl>(RD)) 4573 return true; 4574 return false; 4575 } 4576 #endif 4577 4578 /// getInjectedClassNameType - Return the unique reference to the 4579 /// injected class name type for the specified templated declaration. 4580 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4581 QualType TST) const { 4582 assert(NeedsInjectedClassNameType(Decl)); 4583 if (Decl->TypeForDecl) { 4584 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4585 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4586 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4587 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4588 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4589 } else { 4590 Type *newType = 4591 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 4592 Decl->TypeForDecl = newType; 4593 Types.push_back(newType); 4594 } 4595 return QualType(Decl->TypeForDecl, 0); 4596 } 4597 4598 /// getTypeDeclType - Return the unique reference to the type for the 4599 /// specified type declaration. 4600 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4601 assert(Decl && "Passed null for Decl param"); 4602 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4603 4604 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4605 return getTypedefType(Typedef); 4606 4607 assert(!isa<TemplateTypeParmDecl>(Decl) && 4608 "Template type parameter types are always available."); 4609 4610 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4611 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4612 assert(!NeedsInjectedClassNameType(Record)); 4613 return getRecordType(Record); 4614 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4615 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4616 return getEnumType(Enum); 4617 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4618 return getUnresolvedUsingType(Using); 4619 } else 4620 llvm_unreachable("TypeDecl without a type?"); 4621 4622 return QualType(Decl->TypeForDecl, 0); 4623 } 4624 4625 /// getTypedefType - Return the unique reference to the type for the 4626 /// specified typedef name decl. 4627 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4628 QualType Underlying) const { 4629 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4630 4631 if (Underlying.isNull()) 4632 Underlying = Decl->getUnderlyingType(); 4633 QualType Canonical = getCanonicalType(Underlying); 4634 auto *newType = new (*this, TypeAlignment) 4635 TypedefType(Type::Typedef, Decl, Underlying, Canonical); 4636 Decl->TypeForDecl = newType; 4637 Types.push_back(newType); 4638 return QualType(newType, 0); 4639 } 4640 4641 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4642 QualType Underlying) const { 4643 llvm::FoldingSetNodeID ID; 4644 UsingType::Profile(ID, Found); 4645 4646 void *InsertPos = nullptr; 4647 UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos); 4648 if (T) 4649 return QualType(T, 0); 4650 4651 assert(!Underlying.hasLocalQualifiers()); 4652 assert(Underlying == getTypeDeclType(cast<TypeDecl>(Found->getTargetDecl()))); 4653 QualType Canon = Underlying.getCanonicalType(); 4654 4655 UsingType *NewType = 4656 new (*this, TypeAlignment) UsingType(Found, Underlying, Canon); 4657 Types.push_back(NewType); 4658 UsingTypes.InsertNode(NewType, InsertPos); 4659 return QualType(NewType, 0); 4660 } 4661 4662 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4663 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4664 4665 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4666 if (PrevDecl->TypeForDecl) 4667 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4668 4669 auto *newType = new (*this, TypeAlignment) RecordType(Decl); 4670 Decl->TypeForDecl = newType; 4671 Types.push_back(newType); 4672 return QualType(newType, 0); 4673 } 4674 4675 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4676 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4677 4678 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4679 if (PrevDecl->TypeForDecl) 4680 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4681 4682 auto *newType = new (*this, TypeAlignment) EnumType(Decl); 4683 Decl->TypeForDecl = newType; 4684 Types.push_back(newType); 4685 return QualType(newType, 0); 4686 } 4687 4688 QualType ASTContext::getUnresolvedUsingType( 4689 const UnresolvedUsingTypenameDecl *Decl) const { 4690 if (Decl->TypeForDecl) 4691 return QualType(Decl->TypeForDecl, 0); 4692 4693 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4694 Decl->getCanonicalDecl()) 4695 if (CanonicalDecl->TypeForDecl) 4696 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4697 4698 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Decl); 4699 Decl->TypeForDecl = newType; 4700 Types.push_back(newType); 4701 return QualType(newType, 0); 4702 } 4703 4704 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4705 QualType modifiedType, 4706 QualType equivalentType) const { 4707 llvm::FoldingSetNodeID id; 4708 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4709 4710 void *insertPos = nullptr; 4711 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4712 if (type) return QualType(type, 0); 4713 4714 QualType canon = getCanonicalType(equivalentType); 4715 type = new (*this, TypeAlignment) 4716 AttributedType(canon, attrKind, modifiedType, equivalentType); 4717 4718 Types.push_back(type); 4719 AttributedTypes.InsertNode(type, insertPos); 4720 4721 return QualType(type, 0); 4722 } 4723 4724 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 4725 QualType Wrapped) { 4726 llvm::FoldingSetNodeID ID; 4727 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 4728 4729 void *InsertPos = nullptr; 4730 BTFTagAttributedType *Ty = 4731 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 4732 if (Ty) 4733 return QualType(Ty, 0); 4734 4735 QualType Canon = getCanonicalType(Wrapped); 4736 Ty = new (*this, TypeAlignment) BTFTagAttributedType(Canon, Wrapped, BTFAttr); 4737 4738 Types.push_back(Ty); 4739 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 4740 4741 return QualType(Ty, 0); 4742 } 4743 4744 /// Retrieve a substitution-result type. 4745 QualType 4746 ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, 4747 QualType Replacement) const { 4748 assert(Replacement.isCanonical() 4749 && "replacement types must always be canonical"); 4750 4751 llvm::FoldingSetNodeID ID; 4752 SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); 4753 void *InsertPos = nullptr; 4754 SubstTemplateTypeParmType *SubstParm 4755 = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4756 4757 if (!SubstParm) { 4758 SubstParm = new (*this, TypeAlignment) 4759 SubstTemplateTypeParmType(Parm, Replacement); 4760 Types.push_back(SubstParm); 4761 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4762 } 4763 4764 return QualType(SubstParm, 0); 4765 } 4766 4767 /// Retrieve a 4768 QualType ASTContext::getSubstTemplateTypeParmPackType( 4769 const TemplateTypeParmType *Parm, 4770 const TemplateArgument &ArgPack) { 4771 #ifndef NDEBUG 4772 for (const auto &P : ArgPack.pack_elements()) { 4773 assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type"); 4774 assert(P.getAsType().isCanonical() && "Pack contains non-canonical type"); 4775 } 4776 #endif 4777 4778 llvm::FoldingSetNodeID ID; 4779 SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); 4780 void *InsertPos = nullptr; 4781 if (SubstTemplateTypeParmPackType *SubstParm 4782 = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4783 return QualType(SubstParm, 0); 4784 4785 QualType Canon; 4786 if (!Parm->isCanonicalUnqualified()) { 4787 Canon = getCanonicalType(QualType(Parm, 0)); 4788 Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), 4789 ArgPack); 4790 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4791 } 4792 4793 auto *SubstParm 4794 = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, 4795 ArgPack); 4796 Types.push_back(SubstParm); 4797 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4798 return QualType(SubstParm, 0); 4799 } 4800 4801 /// Retrieve the template type parameter type for a template 4802 /// parameter or parameter pack with the given depth, index, and (optionally) 4803 /// name. 4804 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4805 bool ParameterPack, 4806 TemplateTypeParmDecl *TTPDecl) const { 4807 llvm::FoldingSetNodeID ID; 4808 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4809 void *InsertPos = nullptr; 4810 TemplateTypeParmType *TypeParm 4811 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4812 4813 if (TypeParm) 4814 return QualType(TypeParm, 0); 4815 4816 if (TTPDecl) { 4817 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4818 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 4819 4820 TemplateTypeParmType *TypeCheck 4821 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4822 assert(!TypeCheck && "Template type parameter canonical type broken"); 4823 (void)TypeCheck; 4824 } else 4825 TypeParm = new (*this, TypeAlignment) 4826 TemplateTypeParmType(Depth, Index, ParameterPack); 4827 4828 Types.push_back(TypeParm); 4829 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4830 4831 return QualType(TypeParm, 0); 4832 } 4833 4834 TypeSourceInfo * 4835 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4836 SourceLocation NameLoc, 4837 const TemplateArgumentListInfo &Args, 4838 QualType Underlying) const { 4839 assert(!Name.getAsDependentTemplateName() && 4840 "No dependent template names here!"); 4841 QualType TST = getTemplateSpecializationType(Name, Args, Underlying); 4842 4843 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4844 TemplateSpecializationTypeLoc TL = 4845 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4846 TL.setTemplateKeywordLoc(SourceLocation()); 4847 TL.setTemplateNameLoc(NameLoc); 4848 TL.setLAngleLoc(Args.getLAngleLoc()); 4849 TL.setRAngleLoc(Args.getRAngleLoc()); 4850 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4851 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4852 return DI; 4853 } 4854 4855 QualType 4856 ASTContext::getTemplateSpecializationType(TemplateName Template, 4857 const TemplateArgumentListInfo &Args, 4858 QualType Underlying) const { 4859 assert(!Template.getAsDependentTemplateName() && 4860 "No dependent template names here!"); 4861 4862 SmallVector<TemplateArgument, 4> ArgVec; 4863 ArgVec.reserve(Args.size()); 4864 for (const TemplateArgumentLoc &Arg : Args.arguments()) 4865 ArgVec.push_back(Arg.getArgument()); 4866 4867 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4868 } 4869 4870 #ifndef NDEBUG 4871 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4872 for (const TemplateArgument &Arg : Args) 4873 if (Arg.isPackExpansion()) 4874 return true; 4875 4876 return true; 4877 } 4878 #endif 4879 4880 QualType 4881 ASTContext::getTemplateSpecializationType(TemplateName Template, 4882 ArrayRef<TemplateArgument> Args, 4883 QualType Underlying) const { 4884 assert(!Template.getAsDependentTemplateName() && 4885 "No dependent template names here!"); 4886 // Look through qualified template names. 4887 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4888 Template = QTN->getUnderlyingTemplate(); 4889 4890 bool IsTypeAlias = 4891 isa_and_nonnull<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); 4892 QualType CanonType; 4893 if (!Underlying.isNull()) 4894 CanonType = getCanonicalType(Underlying); 4895 else { 4896 // We can get here with an alias template when the specialization contains 4897 // a pack expansion that does not match up with a parameter pack. 4898 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4899 "Caller must compute aliased type"); 4900 IsTypeAlias = false; 4901 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4902 } 4903 4904 // Allocate the (non-canonical) template specialization type, but don't 4905 // try to unique it: these types typically have location information that 4906 // we don't unique and don't want to lose. 4907 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4908 sizeof(TemplateArgument) * Args.size() + 4909 (IsTypeAlias? sizeof(QualType) : 0), 4910 TypeAlignment); 4911 auto *Spec 4912 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4913 IsTypeAlias ? Underlying : QualType()); 4914 4915 Types.push_back(Spec); 4916 return QualType(Spec, 0); 4917 } 4918 4919 static bool 4920 getCanonicalTemplateArguments(const ASTContext &C, 4921 ArrayRef<TemplateArgument> OrigArgs, 4922 SmallVectorImpl<TemplateArgument> &CanonArgs) { 4923 bool AnyNonCanonArgs = false; 4924 unsigned NumArgs = OrigArgs.size(); 4925 CanonArgs.resize(NumArgs); 4926 for (unsigned I = 0; I != NumArgs; ++I) { 4927 const TemplateArgument &OrigArg = OrigArgs[I]; 4928 TemplateArgument &CanonArg = CanonArgs[I]; 4929 CanonArg = C.getCanonicalTemplateArgument(OrigArg); 4930 if (!CanonArg.structurallyEquals(OrigArg)) 4931 AnyNonCanonArgs = true; 4932 } 4933 return AnyNonCanonArgs; 4934 } 4935 4936 QualType ASTContext::getCanonicalTemplateSpecializationType( 4937 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4938 assert(!Template.getAsDependentTemplateName() && 4939 "No dependent template names here!"); 4940 4941 // Look through qualified template names. 4942 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4943 Template = TemplateName(QTN->getUnderlyingTemplate()); 4944 4945 // Build the canonical template specialization type. 4946 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4947 SmallVector<TemplateArgument, 4> CanonArgs; 4948 ::getCanonicalTemplateArguments(*this, Args, CanonArgs); 4949 4950 // Determine whether this canonical template specialization type already 4951 // exists. 4952 llvm::FoldingSetNodeID ID; 4953 TemplateSpecializationType::Profile(ID, CanonTemplate, 4954 CanonArgs, *this); 4955 4956 void *InsertPos = nullptr; 4957 TemplateSpecializationType *Spec 4958 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4959 4960 if (!Spec) { 4961 // Allocate a new canonical template specialization type. 4962 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4963 sizeof(TemplateArgument) * CanonArgs.size()), 4964 TypeAlignment); 4965 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 4966 CanonArgs, 4967 QualType(), QualType()); 4968 Types.push_back(Spec); 4969 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 4970 } 4971 4972 assert(Spec->isDependentType() && 4973 "Non-dependent template-id type must have a canonical type"); 4974 return QualType(Spec, 0); 4975 } 4976 4977 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 4978 NestedNameSpecifier *NNS, 4979 QualType NamedType, 4980 TagDecl *OwnedTagDecl) const { 4981 llvm::FoldingSetNodeID ID; 4982 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 4983 4984 void *InsertPos = nullptr; 4985 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4986 if (T) 4987 return QualType(T, 0); 4988 4989 QualType Canon = NamedType; 4990 if (!Canon.isCanonical()) { 4991 Canon = getCanonicalType(NamedType); 4992 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4993 assert(!CheckT && "Elaborated canonical type broken"); 4994 (void)CheckT; 4995 } 4996 4997 void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 4998 TypeAlignment); 4999 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 5000 5001 Types.push_back(T); 5002 ElaboratedTypes.InsertNode(T, InsertPos); 5003 return QualType(T, 0); 5004 } 5005 5006 QualType 5007 ASTContext::getParenType(QualType InnerType) const { 5008 llvm::FoldingSetNodeID ID; 5009 ParenType::Profile(ID, InnerType); 5010 5011 void *InsertPos = nullptr; 5012 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5013 if (T) 5014 return QualType(T, 0); 5015 5016 QualType Canon = InnerType; 5017 if (!Canon.isCanonical()) { 5018 Canon = getCanonicalType(InnerType); 5019 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5020 assert(!CheckT && "Paren canonical type broken"); 5021 (void)CheckT; 5022 } 5023 5024 T = new (*this, TypeAlignment) ParenType(InnerType, Canon); 5025 Types.push_back(T); 5026 ParenTypes.InsertNode(T, InsertPos); 5027 return QualType(T, 0); 5028 } 5029 5030 QualType 5031 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5032 const IdentifierInfo *MacroII) const { 5033 QualType Canon = UnderlyingTy; 5034 if (!Canon.isCanonical()) 5035 Canon = getCanonicalType(UnderlyingTy); 5036 5037 auto *newType = new (*this, TypeAlignment) 5038 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5039 Types.push_back(newType); 5040 return QualType(newType, 0); 5041 } 5042 5043 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5044 NestedNameSpecifier *NNS, 5045 const IdentifierInfo *Name, 5046 QualType Canon) const { 5047 if (Canon.isNull()) { 5048 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5049 if (CanonNNS != NNS) 5050 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5051 } 5052 5053 llvm::FoldingSetNodeID ID; 5054 DependentNameType::Profile(ID, Keyword, NNS, Name); 5055 5056 void *InsertPos = nullptr; 5057 DependentNameType *T 5058 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5059 if (T) 5060 return QualType(T, 0); 5061 5062 T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); 5063 Types.push_back(T); 5064 DependentNameTypes.InsertNode(T, InsertPos); 5065 return QualType(T, 0); 5066 } 5067 5068 QualType 5069 ASTContext::getDependentTemplateSpecializationType( 5070 ElaboratedTypeKeyword Keyword, 5071 NestedNameSpecifier *NNS, 5072 const IdentifierInfo *Name, 5073 const TemplateArgumentListInfo &Args) const { 5074 // TODO: avoid this copy 5075 SmallVector<TemplateArgument, 16> ArgCopy; 5076 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5077 ArgCopy.push_back(Args[I].getArgument()); 5078 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5079 } 5080 5081 QualType 5082 ASTContext::getDependentTemplateSpecializationType( 5083 ElaboratedTypeKeyword Keyword, 5084 NestedNameSpecifier *NNS, 5085 const IdentifierInfo *Name, 5086 ArrayRef<TemplateArgument> Args) const { 5087 assert((!NNS || NNS->isDependent()) && 5088 "nested-name-specifier must be dependent"); 5089 5090 llvm::FoldingSetNodeID ID; 5091 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5092 Name, Args); 5093 5094 void *InsertPos = nullptr; 5095 DependentTemplateSpecializationType *T 5096 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5097 if (T) 5098 return QualType(T, 0); 5099 5100 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5101 5102 ElaboratedTypeKeyword CanonKeyword = Keyword; 5103 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 5104 5105 SmallVector<TemplateArgument, 16> CanonArgs; 5106 bool AnyNonCanonArgs = 5107 ::getCanonicalTemplateArguments(*this, Args, CanonArgs); 5108 5109 QualType Canon; 5110 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5111 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5112 Name, 5113 CanonArgs); 5114 5115 // Find the insert position again. 5116 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5117 } 5118 5119 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5120 sizeof(TemplateArgument) * Args.size()), 5121 TypeAlignment); 5122 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5123 Name, Args, Canon); 5124 Types.push_back(T); 5125 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5126 return QualType(T, 0); 5127 } 5128 5129 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5130 TemplateArgument Arg; 5131 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5132 QualType ArgType = getTypeDeclType(TTP); 5133 if (TTP->isParameterPack()) 5134 ArgType = getPackExpansionType(ArgType, None); 5135 5136 Arg = TemplateArgument(ArgType); 5137 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5138 QualType T = 5139 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5140 // For class NTTPs, ensure we include the 'const' so the type matches that 5141 // of a real template argument. 5142 // FIXME: It would be more faithful to model this as something like an 5143 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5144 if (T->isRecordType()) 5145 T.addConst(); 5146 Expr *E = new (*this) DeclRefExpr( 5147 *this, NTTP, /*enclosing*/ false, T, 5148 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5149 5150 if (NTTP->isParameterPack()) 5151 E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(), 5152 None); 5153 Arg = TemplateArgument(E); 5154 } else { 5155 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5156 if (TTP->isParameterPack()) 5157 Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>()); 5158 else 5159 Arg = TemplateArgument(TemplateName(TTP)); 5160 } 5161 5162 if (Param->isTemplateParameterPack()) 5163 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5164 5165 return Arg; 5166 } 5167 5168 void 5169 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5170 SmallVectorImpl<TemplateArgument> &Args) { 5171 Args.reserve(Args.size() + Params->size()); 5172 5173 for (NamedDecl *Param : *Params) 5174 Args.push_back(getInjectedTemplateArg(Param)); 5175 } 5176 5177 QualType ASTContext::getPackExpansionType(QualType Pattern, 5178 Optional<unsigned> NumExpansions, 5179 bool ExpectPackInType) { 5180 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5181 "Pack expansions must expand one or more parameter packs"); 5182 5183 llvm::FoldingSetNodeID ID; 5184 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5185 5186 void *InsertPos = nullptr; 5187 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5188 if (T) 5189 return QualType(T, 0); 5190 5191 QualType Canon; 5192 if (!Pattern.isCanonical()) { 5193 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5194 /*ExpectPackInType=*/false); 5195 5196 // Find the insert position again, in case we inserted an element into 5197 // PackExpansionTypes and invalidated our insert position. 5198 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5199 } 5200 5201 T = new (*this, TypeAlignment) 5202 PackExpansionType(Pattern, Canon, NumExpansions); 5203 Types.push_back(T); 5204 PackExpansionTypes.InsertNode(T, InsertPos); 5205 return QualType(T, 0); 5206 } 5207 5208 /// CmpProtocolNames - Comparison predicate for sorting protocols 5209 /// alphabetically. 5210 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5211 ObjCProtocolDecl *const *RHS) { 5212 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5213 } 5214 5215 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5216 if (Protocols.empty()) return true; 5217 5218 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5219 return false; 5220 5221 for (unsigned i = 1; i != Protocols.size(); ++i) 5222 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5223 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5224 return false; 5225 return true; 5226 } 5227 5228 static void 5229 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5230 // Sort protocols, keyed by name. 5231 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5232 5233 // Canonicalize. 5234 for (ObjCProtocolDecl *&P : Protocols) 5235 P = P->getCanonicalDecl(); 5236 5237 // Remove duplicates. 5238 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5239 Protocols.erase(ProtocolsEnd, Protocols.end()); 5240 } 5241 5242 QualType ASTContext::getObjCObjectType(QualType BaseType, 5243 ObjCProtocolDecl * const *Protocols, 5244 unsigned NumProtocols) const { 5245 return getObjCObjectType(BaseType, {}, 5246 llvm::makeArrayRef(Protocols, NumProtocols), 5247 /*isKindOf=*/false); 5248 } 5249 5250 QualType ASTContext::getObjCObjectType( 5251 QualType baseType, 5252 ArrayRef<QualType> typeArgs, 5253 ArrayRef<ObjCProtocolDecl *> protocols, 5254 bool isKindOf) const { 5255 // If the base type is an interface and there aren't any protocols or 5256 // type arguments to add, then the interface type will do just fine. 5257 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5258 isa<ObjCInterfaceType>(baseType)) 5259 return baseType; 5260 5261 // Look in the folding set for an existing type. 5262 llvm::FoldingSetNodeID ID; 5263 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5264 void *InsertPos = nullptr; 5265 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5266 return QualType(QT, 0); 5267 5268 // Determine the type arguments to be used for canonicalization, 5269 // which may be explicitly specified here or written on the base 5270 // type. 5271 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5272 if (effectiveTypeArgs.empty()) { 5273 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5274 effectiveTypeArgs = baseObject->getTypeArgs(); 5275 } 5276 5277 // Build the canonical type, which has the canonical base type and a 5278 // sorted-and-uniqued list of protocols and the type arguments 5279 // canonicalized. 5280 QualType canonical; 5281 bool typeArgsAreCanonical = llvm::all_of( 5282 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5283 bool protocolsSorted = areSortedAndUniqued(protocols); 5284 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5285 // Determine the canonical type arguments. 5286 ArrayRef<QualType> canonTypeArgs; 5287 SmallVector<QualType, 4> canonTypeArgsVec; 5288 if (!typeArgsAreCanonical) { 5289 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5290 for (auto typeArg : effectiveTypeArgs) 5291 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5292 canonTypeArgs = canonTypeArgsVec; 5293 } else { 5294 canonTypeArgs = effectiveTypeArgs; 5295 } 5296 5297 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5298 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5299 if (!protocolsSorted) { 5300 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5301 SortAndUniqueProtocols(canonProtocolsVec); 5302 canonProtocols = canonProtocolsVec; 5303 } else { 5304 canonProtocols = protocols; 5305 } 5306 5307 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5308 canonProtocols, isKindOf); 5309 5310 // Regenerate InsertPos. 5311 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5312 } 5313 5314 unsigned size = sizeof(ObjCObjectTypeImpl); 5315 size += typeArgs.size() * sizeof(QualType); 5316 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5317 void *mem = Allocate(size, TypeAlignment); 5318 auto *T = 5319 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5320 isKindOf); 5321 5322 Types.push_back(T); 5323 ObjCObjectTypes.InsertNode(T, InsertPos); 5324 return QualType(T, 0); 5325 } 5326 5327 /// Apply Objective-C protocol qualifiers to the given type. 5328 /// If this is for the canonical type of a type parameter, we can apply 5329 /// protocol qualifiers on the ObjCObjectPointerType. 5330 QualType 5331 ASTContext::applyObjCProtocolQualifiers(QualType type, 5332 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5333 bool allowOnPointerType) const { 5334 hasError = false; 5335 5336 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5337 return getObjCTypeParamType(objT->getDecl(), protocols); 5338 } 5339 5340 // Apply protocol qualifiers to ObjCObjectPointerType. 5341 if (allowOnPointerType) { 5342 if (const auto *objPtr = 5343 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5344 const ObjCObjectType *objT = objPtr->getObjectType(); 5345 // Merge protocol lists and construct ObjCObjectType. 5346 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5347 protocolsVec.append(objT->qual_begin(), 5348 objT->qual_end()); 5349 protocolsVec.append(protocols.begin(), protocols.end()); 5350 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5351 type = getObjCObjectType( 5352 objT->getBaseType(), 5353 objT->getTypeArgsAsWritten(), 5354 protocols, 5355 objT->isKindOfTypeAsWritten()); 5356 return getObjCObjectPointerType(type); 5357 } 5358 } 5359 5360 // Apply protocol qualifiers to ObjCObjectType. 5361 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5362 // FIXME: Check for protocols to which the class type is already 5363 // known to conform. 5364 5365 return getObjCObjectType(objT->getBaseType(), 5366 objT->getTypeArgsAsWritten(), 5367 protocols, 5368 objT->isKindOfTypeAsWritten()); 5369 } 5370 5371 // If the canonical type is ObjCObjectType, ... 5372 if (type->isObjCObjectType()) { 5373 // Silently overwrite any existing protocol qualifiers. 5374 // TODO: determine whether that's the right thing to do. 5375 5376 // FIXME: Check for protocols to which the class type is already 5377 // known to conform. 5378 return getObjCObjectType(type, {}, protocols, false); 5379 } 5380 5381 // id<protocol-list> 5382 if (type->isObjCIdType()) { 5383 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5384 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5385 objPtr->isKindOfType()); 5386 return getObjCObjectPointerType(type); 5387 } 5388 5389 // Class<protocol-list> 5390 if (type->isObjCClassType()) { 5391 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5392 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5393 objPtr->isKindOfType()); 5394 return getObjCObjectPointerType(type); 5395 } 5396 5397 hasError = true; 5398 return type; 5399 } 5400 5401 QualType 5402 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5403 ArrayRef<ObjCProtocolDecl *> protocols) const { 5404 // Look in the folding set for an existing type. 5405 llvm::FoldingSetNodeID ID; 5406 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5407 void *InsertPos = nullptr; 5408 if (ObjCTypeParamType *TypeParam = 5409 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5410 return QualType(TypeParam, 0); 5411 5412 // We canonicalize to the underlying type. 5413 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5414 if (!protocols.empty()) { 5415 // Apply the protocol qualifers. 5416 bool hasError; 5417 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5418 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5419 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5420 } 5421 5422 unsigned size = sizeof(ObjCTypeParamType); 5423 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5424 void *mem = Allocate(size, TypeAlignment); 5425 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5426 5427 Types.push_back(newType); 5428 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5429 return QualType(newType, 0); 5430 } 5431 5432 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5433 ObjCTypeParamDecl *New) const { 5434 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5435 // Update TypeForDecl after updating TypeSourceInfo. 5436 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5437 SmallVector<ObjCProtocolDecl *, 8> protocols; 5438 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5439 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5440 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5441 } 5442 5443 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5444 /// protocol list adopt all protocols in QT's qualified-id protocol 5445 /// list. 5446 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5447 ObjCInterfaceDecl *IC) { 5448 if (!QT->isObjCQualifiedIdType()) 5449 return false; 5450 5451 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5452 // If both the right and left sides have qualifiers. 5453 for (auto *Proto : OPT->quals()) { 5454 if (!IC->ClassImplementsProtocol(Proto, false)) 5455 return false; 5456 } 5457 return true; 5458 } 5459 return false; 5460 } 5461 5462 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5463 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5464 /// of protocols. 5465 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5466 ObjCInterfaceDecl *IDecl) { 5467 if (!QT->isObjCQualifiedIdType()) 5468 return false; 5469 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5470 if (!OPT) 5471 return false; 5472 if (!IDecl->hasDefinition()) 5473 return false; 5474 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5475 CollectInheritedProtocols(IDecl, InheritedProtocols); 5476 if (InheritedProtocols.empty()) 5477 return false; 5478 // Check that if every protocol in list of id<plist> conforms to a protocol 5479 // of IDecl's, then bridge casting is ok. 5480 bool Conforms = false; 5481 for (auto *Proto : OPT->quals()) { 5482 Conforms = false; 5483 for (auto *PI : InheritedProtocols) { 5484 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5485 Conforms = true; 5486 break; 5487 } 5488 } 5489 if (!Conforms) 5490 break; 5491 } 5492 if (Conforms) 5493 return true; 5494 5495 for (auto *PI : InheritedProtocols) { 5496 // If both the right and left sides have qualifiers. 5497 bool Adopts = false; 5498 for (auto *Proto : OPT->quals()) { 5499 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5500 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5501 break; 5502 } 5503 if (!Adopts) 5504 return false; 5505 } 5506 return true; 5507 } 5508 5509 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5510 /// the given object type. 5511 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5512 llvm::FoldingSetNodeID ID; 5513 ObjCObjectPointerType::Profile(ID, ObjectT); 5514 5515 void *InsertPos = nullptr; 5516 if (ObjCObjectPointerType *QT = 5517 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5518 return QualType(QT, 0); 5519 5520 // Find the canonical object type. 5521 QualType Canonical; 5522 if (!ObjectT.isCanonical()) { 5523 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5524 5525 // Regenerate InsertPos. 5526 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5527 } 5528 5529 // No match. 5530 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 5531 auto *QType = 5532 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5533 5534 Types.push_back(QType); 5535 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5536 return QualType(QType, 0); 5537 } 5538 5539 /// getObjCInterfaceType - Return the unique reference to the type for the 5540 /// specified ObjC interface decl. The list of protocols is optional. 5541 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5542 ObjCInterfaceDecl *PrevDecl) const { 5543 if (Decl->TypeForDecl) 5544 return QualType(Decl->TypeForDecl, 0); 5545 5546 if (PrevDecl) { 5547 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5548 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5549 return QualType(PrevDecl->TypeForDecl, 0); 5550 } 5551 5552 // Prefer the definition, if there is one. 5553 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5554 Decl = Def; 5555 5556 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 5557 auto *T = new (Mem) ObjCInterfaceType(Decl); 5558 Decl->TypeForDecl = T; 5559 Types.push_back(T); 5560 return QualType(T, 0); 5561 } 5562 5563 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5564 /// TypeOfExprType AST's (since expression's are never shared). For example, 5565 /// multiple declarations that refer to "typeof(x)" all contain different 5566 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5567 /// on canonical type's (which are always unique). 5568 QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { 5569 TypeOfExprType *toe; 5570 if (tofExpr->isTypeDependent()) { 5571 llvm::FoldingSetNodeID ID; 5572 DependentTypeOfExprType::Profile(ID, *this, tofExpr); 5573 5574 void *InsertPos = nullptr; 5575 DependentTypeOfExprType *Canon 5576 = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5577 if (Canon) { 5578 // We already have a "canonical" version of an identical, dependent 5579 // typeof(expr) type. Use that as our canonical type. 5580 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, 5581 QualType((TypeOfExprType*)Canon, 0)); 5582 } else { 5583 // Build a new, canonical typeof(expr) type. 5584 Canon 5585 = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); 5586 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5587 toe = Canon; 5588 } 5589 } else { 5590 QualType Canonical = getCanonicalType(tofExpr->getType()); 5591 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); 5592 } 5593 Types.push_back(toe); 5594 return QualType(toe, 0); 5595 } 5596 5597 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5598 /// TypeOfType nodes. The only motivation to unique these nodes would be 5599 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5600 /// an issue. This doesn't affect the type checker, since it operates 5601 /// on canonical types (which are always unique). 5602 QualType ASTContext::getTypeOfType(QualType tofType) const { 5603 QualType Canonical = getCanonicalType(tofType); 5604 auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); 5605 Types.push_back(tot); 5606 return QualType(tot, 0); 5607 } 5608 5609 /// getReferenceQualifiedType - Given an expr, will return the type for 5610 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5611 /// and class member access into account. 5612 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5613 // C++11 [dcl.type.simple]p4: 5614 // [...] 5615 QualType T = E->getType(); 5616 switch (E->getValueKind()) { 5617 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5618 // type of e; 5619 case VK_XValue: 5620 return getRValueReferenceType(T); 5621 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5622 // type of e; 5623 case VK_LValue: 5624 return getLValueReferenceType(T); 5625 // - otherwise, decltype(e) is the type of e. 5626 case VK_PRValue: 5627 return T; 5628 } 5629 llvm_unreachable("Unknown value kind"); 5630 } 5631 5632 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5633 /// nodes. This would never be helpful, since each such type has its own 5634 /// expression, and would not give a significant memory saving, since there 5635 /// is an Expr tree under each such type. 5636 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5637 DecltypeType *dt; 5638 5639 // C++11 [temp.type]p2: 5640 // If an expression e involves a template parameter, decltype(e) denotes a 5641 // unique dependent type. Two such decltype-specifiers refer to the same 5642 // type only if their expressions are equivalent (14.5.6.1). 5643 if (e->isInstantiationDependent()) { 5644 llvm::FoldingSetNodeID ID; 5645 DependentDecltypeType::Profile(ID, *this, e); 5646 5647 void *InsertPos = nullptr; 5648 DependentDecltypeType *Canon 5649 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5650 if (!Canon) { 5651 // Build a new, canonical decltype(expr) type. 5652 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 5653 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5654 } 5655 dt = new (*this, TypeAlignment) 5656 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5657 } else { 5658 dt = new (*this, TypeAlignment) 5659 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5660 } 5661 Types.push_back(dt); 5662 return QualType(dt, 0); 5663 } 5664 5665 /// getUnaryTransformationType - We don't unique these, since the memory 5666 /// savings are minimal and these are rare. 5667 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5668 QualType UnderlyingType, 5669 UnaryTransformType::UTTKind Kind) 5670 const { 5671 UnaryTransformType *ut = nullptr; 5672 5673 if (BaseType->isDependentType()) { 5674 // Look in the folding set for an existing type. 5675 llvm::FoldingSetNodeID ID; 5676 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5677 5678 void *InsertPos = nullptr; 5679 DependentUnaryTransformType *Canon 5680 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5681 5682 if (!Canon) { 5683 // Build a new, canonical __underlying_type(type) type. 5684 Canon = new (*this, TypeAlignment) 5685 DependentUnaryTransformType(*this, getCanonicalType(BaseType), 5686 Kind); 5687 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5688 } 5689 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5690 QualType(), Kind, 5691 QualType(Canon, 0)); 5692 } else { 5693 QualType CanonType = getCanonicalType(UnderlyingType); 5694 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5695 UnderlyingType, Kind, 5696 CanonType); 5697 } 5698 Types.push_back(ut); 5699 return QualType(ut, 0); 5700 } 5701 5702 QualType ASTContext::getAutoTypeInternal( 5703 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5704 bool IsPack, ConceptDecl *TypeConstraintConcept, 5705 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5706 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5707 !TypeConstraintConcept && !IsDependent) 5708 return getAutoDeductType(); 5709 5710 if (TypeConstraintConcept) 5711 TypeConstraintConcept = TypeConstraintConcept->getCanonicalDecl(); 5712 5713 // Look in the folding set for an existing type. 5714 void *InsertPos = nullptr; 5715 llvm::FoldingSetNodeID ID; 5716 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5717 TypeConstraintConcept, TypeConstraintArgs); 5718 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5719 return QualType(AT, 0); 5720 5721 QualType Canon; 5722 if (!IsCanon) { 5723 if (DeducedType.isNull()) { 5724 SmallVector<TemplateArgument, 4> CanonArgs; 5725 bool AnyNonCanonArgs = 5726 ::getCanonicalTemplateArguments(*this, TypeConstraintArgs, CanonArgs); 5727 if (AnyNonCanonArgs) { 5728 Canon = getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5729 TypeConstraintConcept, CanonArgs, true); 5730 // Find the insert position again. 5731 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5732 } 5733 } else { 5734 Canon = DeducedType.getCanonicalType(); 5735 } 5736 } 5737 5738 void *Mem = Allocate(sizeof(AutoType) + 5739 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5740 TypeAlignment); 5741 auto *AT = new (Mem) AutoType( 5742 DeducedType, Keyword, 5743 (IsDependent ? TypeDependence::DependentInstantiation 5744 : TypeDependence::None) | 5745 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5746 Canon, TypeConstraintConcept, TypeConstraintArgs); 5747 Types.push_back(AT); 5748 AutoTypes.InsertNode(AT, InsertPos); 5749 return QualType(AT, 0); 5750 } 5751 5752 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5753 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5754 /// canonical deduced-but-dependent 'auto' type. 5755 QualType 5756 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5757 bool IsDependent, bool IsPack, 5758 ConceptDecl *TypeConstraintConcept, 5759 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5760 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5761 assert((!IsDependent || DeducedType.isNull()) && 5762 "A dependent auto should be undeduced"); 5763 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5764 TypeConstraintConcept, TypeConstraintArgs); 5765 } 5766 5767 /// Return the uniqued reference to the deduced template specialization type 5768 /// which has been deduced to the given type, or to the canonical undeduced 5769 /// such type, or the canonical deduced-but-dependent such type. 5770 QualType ASTContext::getDeducedTemplateSpecializationType( 5771 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5772 // Look in the folding set for an existing type. 5773 void *InsertPos = nullptr; 5774 llvm::FoldingSetNodeID ID; 5775 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5776 IsDependent); 5777 if (DeducedTemplateSpecializationType *DTST = 5778 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5779 return QualType(DTST, 0); 5780 5781 auto *DTST = new (*this, TypeAlignment) 5782 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5783 llvm::FoldingSetNodeID TempID; 5784 DTST->Profile(TempID); 5785 assert(ID == TempID && "ID does not match"); 5786 Types.push_back(DTST); 5787 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5788 return QualType(DTST, 0); 5789 } 5790 5791 /// getAtomicType - Return the uniqued reference to the atomic type for 5792 /// the given value type. 5793 QualType ASTContext::getAtomicType(QualType T) const { 5794 // Unique pointers, to guarantee there is only one pointer of a particular 5795 // structure. 5796 llvm::FoldingSetNodeID ID; 5797 AtomicType::Profile(ID, T); 5798 5799 void *InsertPos = nullptr; 5800 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5801 return QualType(AT, 0); 5802 5803 // If the atomic value type isn't canonical, this won't be a canonical type 5804 // either, so fill in the canonical type field. 5805 QualType Canonical; 5806 if (!T.isCanonical()) { 5807 Canonical = getAtomicType(getCanonicalType(T)); 5808 5809 // Get the new insert position for the node we care about. 5810 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5811 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5812 } 5813 auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 5814 Types.push_back(New); 5815 AtomicTypes.InsertNode(New, InsertPos); 5816 return QualType(New, 0); 5817 } 5818 5819 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5820 QualType ASTContext::getAutoDeductType() const { 5821 if (AutoDeductTy.isNull()) 5822 AutoDeductTy = QualType(new (*this, TypeAlignment) 5823 AutoType(QualType(), AutoTypeKeyword::Auto, 5824 TypeDependence::None, QualType(), 5825 /*concept*/ nullptr, /*args*/ {}), 5826 0); 5827 return AutoDeductTy; 5828 } 5829 5830 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5831 QualType ASTContext::getAutoRRefDeductType() const { 5832 if (AutoRRefDeductTy.isNull()) 5833 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5834 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5835 return AutoRRefDeductTy; 5836 } 5837 5838 /// getTagDeclType - Return the unique reference to the type for the 5839 /// specified TagDecl (struct/union/class/enum) decl. 5840 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5841 assert(Decl); 5842 // FIXME: What is the design on getTagDeclType when it requires casting 5843 // away const? mutable? 5844 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5845 } 5846 5847 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5848 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5849 /// needs to agree with the definition in <stddef.h>. 5850 CanQualType ASTContext::getSizeType() const { 5851 return getFromTargetType(Target->getSizeType()); 5852 } 5853 5854 /// Return the unique signed counterpart of the integer type 5855 /// corresponding to size_t. 5856 CanQualType ASTContext::getSignedSizeType() const { 5857 return getFromTargetType(Target->getSignedSizeType()); 5858 } 5859 5860 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5861 CanQualType ASTContext::getIntMaxType() const { 5862 return getFromTargetType(Target->getIntMaxType()); 5863 } 5864 5865 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5866 CanQualType ASTContext::getUIntMaxType() const { 5867 return getFromTargetType(Target->getUIntMaxType()); 5868 } 5869 5870 /// getSignedWCharType - Return the type of "signed wchar_t". 5871 /// Used when in C++, as a GCC extension. 5872 QualType ASTContext::getSignedWCharType() const { 5873 // FIXME: derive from "Target" ? 5874 return WCharTy; 5875 } 5876 5877 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5878 /// Used when in C++, as a GCC extension. 5879 QualType ASTContext::getUnsignedWCharType() const { 5880 // FIXME: derive from "Target" ? 5881 return UnsignedIntTy; 5882 } 5883 5884 QualType ASTContext::getIntPtrType() const { 5885 return getFromTargetType(Target->getIntPtrType()); 5886 } 5887 5888 QualType ASTContext::getUIntPtrType() const { 5889 return getCorrespondingUnsignedType(getIntPtrType()); 5890 } 5891 5892 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5893 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5894 QualType ASTContext::getPointerDiffType() const { 5895 return getFromTargetType(Target->getPtrDiffType(0)); 5896 } 5897 5898 /// Return the unique unsigned counterpart of "ptrdiff_t" 5899 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5900 /// in the definition of %tu format specifier. 5901 QualType ASTContext::getUnsignedPointerDiffType() const { 5902 return getFromTargetType(Target->getUnsignedPtrDiffType(0)); 5903 } 5904 5905 /// Return the unique type for "pid_t" defined in 5906 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5907 QualType ASTContext::getProcessIDType() const { 5908 return getFromTargetType(Target->getProcessIDType()); 5909 } 5910 5911 //===----------------------------------------------------------------------===// 5912 // Type Operators 5913 //===----------------------------------------------------------------------===// 5914 5915 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5916 // Push qualifiers into arrays, and then discard any remaining 5917 // qualifiers. 5918 T = getCanonicalType(T); 5919 T = getVariableArrayDecayedType(T); 5920 const Type *Ty = T.getTypePtr(); 5921 QualType Result; 5922 if (isa<ArrayType>(Ty)) { 5923 Result = getArrayDecayedType(QualType(Ty,0)); 5924 } else if (isa<FunctionType>(Ty)) { 5925 Result = getPointerType(QualType(Ty, 0)); 5926 } else { 5927 Result = QualType(Ty, 0); 5928 } 5929 5930 return CanQualType::CreateUnsafe(Result); 5931 } 5932 5933 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5934 Qualifiers &quals) { 5935 SplitQualType splitType = type.getSplitUnqualifiedType(); 5936 5937 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5938 // the unqualified desugared type and then drops it on the floor. 5939 // We then have to strip that sugar back off with 5940 // getUnqualifiedDesugaredType(), which is silly. 5941 const auto *AT = 5942 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5943 5944 // If we don't have an array, just use the results in splitType. 5945 if (!AT) { 5946 quals = splitType.Quals; 5947 return QualType(splitType.Ty, 0); 5948 } 5949 5950 // Otherwise, recurse on the array's element type. 5951 QualType elementType = AT->getElementType(); 5952 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 5953 5954 // If that didn't change the element type, AT has no qualifiers, so we 5955 // can just use the results in splitType. 5956 if (elementType == unqualElementType) { 5957 assert(quals.empty()); // from the recursive call 5958 quals = splitType.Quals; 5959 return QualType(splitType.Ty, 0); 5960 } 5961 5962 // Otherwise, add in the qualifiers from the outermost type, then 5963 // build the type back up. 5964 quals.addConsistentQualifiers(splitType.Quals); 5965 5966 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 5967 return getConstantArrayType(unqualElementType, CAT->getSize(), 5968 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 5969 } 5970 5971 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 5972 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 5973 } 5974 5975 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 5976 return getVariableArrayType(unqualElementType, 5977 VAT->getSizeExpr(), 5978 VAT->getSizeModifier(), 5979 VAT->getIndexTypeCVRQualifiers(), 5980 VAT->getBracketsRange()); 5981 } 5982 5983 const auto *DSAT = cast<DependentSizedArrayType>(AT); 5984 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 5985 DSAT->getSizeModifier(), 0, 5986 SourceRange()); 5987 } 5988 5989 /// Attempt to unwrap two types that may both be array types with the same bound 5990 /// (or both be array types of unknown bound) for the purpose of comparing the 5991 /// cv-decomposition of two types per C++ [conv.qual]. 5992 /// 5993 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 5994 /// C++20 [conv.qual], if permitted by the current language mode. 5995 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 5996 bool AllowPiMismatch) { 5997 while (true) { 5998 auto *AT1 = getAsArrayType(T1); 5999 if (!AT1) 6000 return; 6001 6002 auto *AT2 = getAsArrayType(T2); 6003 if (!AT2) 6004 return; 6005 6006 // If we don't have two array types with the same constant bound nor two 6007 // incomplete array types, we've unwrapped everything we can. 6008 // C++20 also permits one type to be a constant array type and the other 6009 // to be an incomplete array type. 6010 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6011 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6012 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6013 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6014 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6015 isa<IncompleteArrayType>(AT2)))) 6016 return; 6017 } else if (isa<IncompleteArrayType>(AT1)) { 6018 if (!(isa<IncompleteArrayType>(AT2) || 6019 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6020 isa<ConstantArrayType>(AT2)))) 6021 return; 6022 } else { 6023 return; 6024 } 6025 6026 T1 = AT1->getElementType(); 6027 T2 = AT2->getElementType(); 6028 } 6029 } 6030 6031 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6032 /// 6033 /// If T1 and T2 are both pointer types of the same kind, or both array types 6034 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6035 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6036 /// 6037 /// This function will typically be called in a loop that successively 6038 /// "unwraps" pointer and pointer-to-member types to compare them at each 6039 /// level. 6040 /// 6041 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6042 /// C++20 [conv.qual], if permitted by the current language mode. 6043 /// 6044 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6045 /// pair of types that can't be unwrapped further. 6046 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6047 bool AllowPiMismatch) { 6048 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6049 6050 const auto *T1PtrType = T1->getAs<PointerType>(); 6051 const auto *T2PtrType = T2->getAs<PointerType>(); 6052 if (T1PtrType && T2PtrType) { 6053 T1 = T1PtrType->getPointeeType(); 6054 T2 = T2PtrType->getPointeeType(); 6055 return true; 6056 } 6057 6058 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6059 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6060 if (T1MPType && T2MPType && 6061 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6062 QualType(T2MPType->getClass(), 0))) { 6063 T1 = T1MPType->getPointeeType(); 6064 T2 = T2MPType->getPointeeType(); 6065 return true; 6066 } 6067 6068 if (getLangOpts().ObjC) { 6069 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6070 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6071 if (T1OPType && T2OPType) { 6072 T1 = T1OPType->getPointeeType(); 6073 T2 = T2OPType->getPointeeType(); 6074 return true; 6075 } 6076 } 6077 6078 // FIXME: Block pointers, too? 6079 6080 return false; 6081 } 6082 6083 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6084 while (true) { 6085 Qualifiers Quals; 6086 T1 = getUnqualifiedArrayType(T1, Quals); 6087 T2 = getUnqualifiedArrayType(T2, Quals); 6088 if (hasSameType(T1, T2)) 6089 return true; 6090 if (!UnwrapSimilarTypes(T1, T2)) 6091 return false; 6092 } 6093 } 6094 6095 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6096 while (true) { 6097 Qualifiers Quals1, Quals2; 6098 T1 = getUnqualifiedArrayType(T1, Quals1); 6099 T2 = getUnqualifiedArrayType(T2, Quals2); 6100 6101 Quals1.removeCVRQualifiers(); 6102 Quals2.removeCVRQualifiers(); 6103 if (Quals1 != Quals2) 6104 return false; 6105 6106 if (hasSameType(T1, T2)) 6107 return true; 6108 6109 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6110 return false; 6111 } 6112 } 6113 6114 DeclarationNameInfo 6115 ASTContext::getNameForTemplate(TemplateName Name, 6116 SourceLocation NameLoc) const { 6117 switch (Name.getKind()) { 6118 case TemplateName::QualifiedTemplate: 6119 case TemplateName::Template: 6120 // DNInfo work in progress: CHECKME: what about DNLoc? 6121 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6122 NameLoc); 6123 6124 case TemplateName::OverloadedTemplate: { 6125 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6126 // DNInfo work in progress: CHECKME: what about DNLoc? 6127 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6128 } 6129 6130 case TemplateName::AssumedTemplate: { 6131 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6132 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6133 } 6134 6135 case TemplateName::DependentTemplate: { 6136 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6137 DeclarationName DName; 6138 if (DTN->isIdentifier()) { 6139 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6140 return DeclarationNameInfo(DName, NameLoc); 6141 } else { 6142 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6143 // DNInfo work in progress: FIXME: source locations? 6144 DeclarationNameLoc DNLoc = 6145 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6146 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6147 } 6148 } 6149 6150 case TemplateName::SubstTemplateTemplateParm: { 6151 SubstTemplateTemplateParmStorage *subst 6152 = Name.getAsSubstTemplateTemplateParm(); 6153 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6154 NameLoc); 6155 } 6156 6157 case TemplateName::SubstTemplateTemplateParmPack: { 6158 SubstTemplateTemplateParmPackStorage *subst 6159 = Name.getAsSubstTemplateTemplateParmPack(); 6160 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6161 NameLoc); 6162 } 6163 case TemplateName::UsingTemplate: 6164 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6165 NameLoc); 6166 } 6167 6168 llvm_unreachable("bad template name kind!"); 6169 } 6170 6171 TemplateName 6172 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6173 switch (Name.getKind()) { 6174 case TemplateName::UsingTemplate: 6175 case TemplateName::QualifiedTemplate: 6176 case TemplateName::Template: { 6177 TemplateDecl *Template = Name.getAsTemplateDecl(); 6178 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6179 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6180 6181 // The canonical template name is the canonical template declaration. 6182 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6183 } 6184 6185 case TemplateName::OverloadedTemplate: 6186 case TemplateName::AssumedTemplate: 6187 llvm_unreachable("cannot canonicalize unresolved template"); 6188 6189 case TemplateName::DependentTemplate: { 6190 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6191 assert(DTN && "Non-dependent template names must refer to template decls."); 6192 return DTN->CanonicalTemplateName; 6193 } 6194 6195 case TemplateName::SubstTemplateTemplateParm: { 6196 SubstTemplateTemplateParmStorage *subst 6197 = Name.getAsSubstTemplateTemplateParm(); 6198 return getCanonicalTemplateName(subst->getReplacement()); 6199 } 6200 6201 case TemplateName::SubstTemplateTemplateParmPack: { 6202 SubstTemplateTemplateParmPackStorage *subst 6203 = Name.getAsSubstTemplateTemplateParmPack(); 6204 TemplateTemplateParmDecl *canonParameter 6205 = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); 6206 TemplateArgument canonArgPack 6207 = getCanonicalTemplateArgument(subst->getArgumentPack()); 6208 return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); 6209 } 6210 } 6211 6212 llvm_unreachable("bad template name!"); 6213 } 6214 6215 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6216 const TemplateName &Y) const { 6217 return getCanonicalTemplateName(X).getAsVoidPointer() == 6218 getCanonicalTemplateName(Y).getAsVoidPointer(); 6219 } 6220 6221 bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { 6222 if (!XCE != !YCE) 6223 return false; 6224 6225 if (!XCE) 6226 return true; 6227 6228 llvm::FoldingSetNodeID XCEID, YCEID; 6229 XCE->Profile(XCEID, *this, /*Canonical=*/true); 6230 YCE->Profile(YCEID, *this, /*Canonical=*/true); 6231 return XCEID == YCEID; 6232 } 6233 6234 bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, 6235 const TypeConstraint *YTC) const { 6236 if (!XTC != !YTC) 6237 return false; 6238 6239 if (!XTC) 6240 return true; 6241 6242 auto *NCX = XTC->getNamedConcept(); 6243 auto *NCY = YTC->getNamedConcept(); 6244 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6245 return false; 6246 if (XTC->hasExplicitTemplateArgs() != YTC->hasExplicitTemplateArgs()) 6247 return false; 6248 if (XTC->hasExplicitTemplateArgs()) 6249 if (XTC->getTemplateArgsAsWritten()->NumTemplateArgs != 6250 YTC->getTemplateArgsAsWritten()->NumTemplateArgs) 6251 return false; 6252 6253 // Compare slowly by profiling. 6254 // 6255 // We couldn't compare the profiling result for the template 6256 // args here. Consider the following example in different modules: 6257 // 6258 // template <__integer_like _Tp, C<_Tp> Sentinel> 6259 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { 6260 // return __t; 6261 // } 6262 // 6263 // When we compare the profiling result for `C<_Tp>` in different 6264 // modules, it will compare the type of `_Tp` in different modules. 6265 // However, the type of `_Tp` in different modules refer to different 6266 // types here naturally. So we couldn't compare the profiling result 6267 // for the template args directly. 6268 return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), 6269 YTC->getImmediatelyDeclaredConstraint()); 6270 } 6271 6272 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6273 const NamedDecl *Y) const { 6274 if (X->getKind() != Y->getKind()) 6275 return false; 6276 6277 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6278 auto *TY = cast<TemplateTypeParmDecl>(Y); 6279 if (TX->isParameterPack() != TY->isParameterPack()) 6280 return false; 6281 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6282 return false; 6283 return isSameTypeConstraint(TX->getTypeConstraint(), 6284 TY->getTypeConstraint()); 6285 } 6286 6287 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6288 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6289 return TX->isParameterPack() == TY->isParameterPack() && 6290 TX->getASTContext().hasSameType(TX->getType(), TY->getType()); 6291 } 6292 6293 auto *TX = cast<TemplateTemplateParmDecl>(X); 6294 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6295 return TX->isParameterPack() == TY->isParameterPack() && 6296 isSameTemplateParameterList(TX->getTemplateParameters(), 6297 TY->getTemplateParameters()); 6298 } 6299 6300 bool ASTContext::isSameTemplateParameterList( 6301 const TemplateParameterList *X, const TemplateParameterList *Y) const { 6302 if (X->size() != Y->size()) 6303 return false; 6304 6305 for (unsigned I = 0, N = X->size(); I != N; ++I) 6306 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6307 return false; 6308 6309 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); 6310 } 6311 6312 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, 6313 const NamedDecl *Y) const { 6314 // If the type parameter isn't the same already, we don't need to check the 6315 // default argument further. 6316 if (!isSameTemplateParameter(X, Y)) 6317 return false; 6318 6319 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { 6320 auto *TTPY = cast<TemplateTypeParmDecl>(Y); 6321 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6322 return false; 6323 6324 return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument()); 6325 } 6326 6327 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6328 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); 6329 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) 6330 return false; 6331 6332 Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts(); 6333 Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts(); 6334 llvm::FoldingSetNodeID XID, YID; 6335 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); 6336 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); 6337 return XID == YID; 6338 } 6339 6340 auto *TTPX = cast<TemplateTemplateParmDecl>(X); 6341 auto *TTPY = cast<TemplateTemplateParmDecl>(Y); 6342 6343 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6344 return false; 6345 6346 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); 6347 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); 6348 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); 6349 } 6350 6351 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6352 if (auto *NS = X->getAsNamespace()) 6353 return NS; 6354 if (auto *NAS = X->getAsNamespaceAlias()) 6355 return NAS->getNamespace(); 6356 return nullptr; 6357 } 6358 6359 static bool isSameQualifier(const NestedNameSpecifier *X, 6360 const NestedNameSpecifier *Y) { 6361 if (auto *NSX = getNamespace(X)) { 6362 auto *NSY = getNamespace(Y); 6363 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6364 return false; 6365 } else if (X->getKind() != Y->getKind()) 6366 return false; 6367 6368 // FIXME: For namespaces and types, we're permitted to check that the entity 6369 // is named via the same tokens. We should probably do so. 6370 switch (X->getKind()) { 6371 case NestedNameSpecifier::Identifier: 6372 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6373 return false; 6374 break; 6375 case NestedNameSpecifier::Namespace: 6376 case NestedNameSpecifier::NamespaceAlias: 6377 // We've already checked that we named the same namespace. 6378 break; 6379 case NestedNameSpecifier::TypeSpec: 6380 case NestedNameSpecifier::TypeSpecWithTemplate: 6381 if (X->getAsType()->getCanonicalTypeInternal() != 6382 Y->getAsType()->getCanonicalTypeInternal()) 6383 return false; 6384 break; 6385 case NestedNameSpecifier::Global: 6386 case NestedNameSpecifier::Super: 6387 return true; 6388 } 6389 6390 // Recurse into earlier portion of NNS, if any. 6391 auto *PX = X->getPrefix(); 6392 auto *PY = Y->getPrefix(); 6393 if (PX && PY) 6394 return isSameQualifier(PX, PY); 6395 return !PX && !PY; 6396 } 6397 6398 /// Determine whether the attributes we can overload on are identical for A and 6399 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6400 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6401 const FunctionDecl *B) { 6402 // Note that pass_object_size attributes are represented in the function's 6403 // ExtParameterInfo, so we don't need to check them here. 6404 6405 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6406 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6407 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6408 6409 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6410 Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6411 Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6412 6413 // Return false if the number of enable_if attributes is different. 6414 if (!Cand1A || !Cand2A) 6415 return false; 6416 6417 Cand1ID.clear(); 6418 Cand2ID.clear(); 6419 6420 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6421 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6422 6423 // Return false if any of the enable_if expressions of A and B are 6424 // different. 6425 if (Cand1ID != Cand2ID) 6426 return false; 6427 } 6428 return true; 6429 } 6430 6431 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { 6432 if (X == Y) 6433 return true; 6434 6435 if (X->getDeclName() != Y->getDeclName()) 6436 return false; 6437 6438 // Must be in the same context. 6439 // 6440 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6441 // could be two different declarations of the same function. (We will fix the 6442 // semantic DC to refer to the primary definition after merging.) 6443 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6444 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6445 return false; 6446 6447 // Two typedefs refer to the same entity if they have the same underlying 6448 // type. 6449 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6450 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6451 return hasSameType(TypedefX->getUnderlyingType(), 6452 TypedefY->getUnderlyingType()); 6453 6454 // Must have the same kind. 6455 if (X->getKind() != Y->getKind()) 6456 return false; 6457 6458 // Objective-C classes and protocols with the same name always match. 6459 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6460 return true; 6461 6462 if (isa<ClassTemplateSpecializationDecl>(X)) { 6463 // No need to handle these here: we merge them when adding them to the 6464 // template. 6465 return false; 6466 } 6467 6468 // Compatible tags match. 6469 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6470 const auto *TagY = cast<TagDecl>(Y); 6471 return (TagX->getTagKind() == TagY->getTagKind()) || 6472 ((TagX->getTagKind() == TTK_Struct || 6473 TagX->getTagKind() == TTK_Class || 6474 TagX->getTagKind() == TTK_Interface) && 6475 (TagY->getTagKind() == TTK_Struct || 6476 TagY->getTagKind() == TTK_Class || 6477 TagY->getTagKind() == TTK_Interface)); 6478 } 6479 6480 // Functions with the same type and linkage match. 6481 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6482 // functions, etc. 6483 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6484 const auto *FuncY = cast<FunctionDecl>(Y); 6485 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6486 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6487 if (CtorX->getInheritedConstructor() && 6488 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6489 CtorY->getInheritedConstructor().getConstructor())) 6490 return false; 6491 } 6492 6493 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6494 return false; 6495 6496 // Multiversioned functions with different feature strings are represented 6497 // as separate declarations. 6498 if (FuncX->isMultiVersion()) { 6499 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6500 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6501 assert(TAX && TAY && "Multiversion Function without target attribute"); 6502 6503 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6504 return false; 6505 } 6506 6507 if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), 6508 FuncY->getTrailingRequiresClause())) 6509 return false; 6510 6511 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6512 // Map to the first declaration that we've already merged into this one. 6513 // The TSI of redeclarations might not match (due to calling conventions 6514 // being inherited onto the type but not the TSI), but the TSI type of 6515 // the first declaration of the function should match across modules. 6516 FD = FD->getCanonicalDecl(); 6517 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6518 : FD->getType(); 6519 }; 6520 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6521 if (!hasSameType(XT, YT)) { 6522 // We can get functions with different types on the redecl chain in C++17 6523 // if they have differing exception specifications and at least one of 6524 // the excpetion specs is unresolved. 6525 auto *XFPT = XT->getAs<FunctionProtoType>(); 6526 auto *YFPT = YT->getAs<FunctionProtoType>(); 6527 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6528 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6529 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6530 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6531 return true; 6532 return false; 6533 } 6534 6535 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6536 hasSameOverloadableAttrs(FuncX, FuncY); 6537 } 6538 6539 // Variables with the same type and linkage match. 6540 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6541 const auto *VarY = cast<VarDecl>(Y); 6542 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6543 if (hasSameType(VarX->getType(), VarY->getType())) 6544 return true; 6545 6546 // We can get decls with different types on the redecl chain. Eg. 6547 // template <typename T> struct S { static T Var[]; }; // #1 6548 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6549 // Only? happens when completing an incomplete array type. In this case 6550 // when comparing #1 and #2 we should go through their element type. 6551 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6552 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6553 if (!VarXTy || !VarYTy) 6554 return false; 6555 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6556 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6557 } 6558 return false; 6559 } 6560 6561 // Namespaces with the same name and inlinedness match. 6562 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6563 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6564 return NamespaceX->isInline() == NamespaceY->isInline(); 6565 } 6566 6567 // Identical template names and kinds match if their template parameter lists 6568 // and patterns match. 6569 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6570 const auto *TemplateY = cast<TemplateDecl>(Y); 6571 6572 // ConceptDecl wouldn't be the same if their constraint expression differs. 6573 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { 6574 const auto *ConceptY = cast<ConceptDecl>(Y); 6575 const Expr *XCE = ConceptX->getConstraintExpr(); 6576 const Expr *YCE = ConceptY->getConstraintExpr(); 6577 assert(XCE && YCE && "ConceptDecl without constraint expression?"); 6578 llvm::FoldingSetNodeID XID, YID; 6579 XCE->Profile(XID, *this, /*Canonical=*/true); 6580 YCE->Profile(YID, *this, /*Canonical=*/true); 6581 if (XID != YID) 6582 return false; 6583 } 6584 6585 return isSameEntity(TemplateX->getTemplatedDecl(), 6586 TemplateY->getTemplatedDecl()) && 6587 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6588 TemplateY->getTemplateParameters()); 6589 } 6590 6591 // Fields with the same name and the same type match. 6592 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6593 const auto *FDY = cast<FieldDecl>(Y); 6594 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6595 return hasSameType(FDX->getType(), FDY->getType()); 6596 } 6597 6598 // Indirect fields with the same target field match. 6599 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6600 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6601 return IFDX->getAnonField()->getCanonicalDecl() == 6602 IFDY->getAnonField()->getCanonicalDecl(); 6603 } 6604 6605 // Enumerators with the same name match. 6606 if (isa<EnumConstantDecl>(X)) 6607 // FIXME: Also check the value is odr-equivalent. 6608 return true; 6609 6610 // Using shadow declarations with the same target match. 6611 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6612 const auto *USY = cast<UsingShadowDecl>(Y); 6613 return USX->getTargetDecl() == USY->getTargetDecl(); 6614 } 6615 6616 // Using declarations with the same qualifier match. (We already know that 6617 // the name matches.) 6618 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6619 const auto *UY = cast<UsingDecl>(Y); 6620 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6621 UX->hasTypename() == UY->hasTypename() && 6622 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6623 } 6624 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6625 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6626 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6627 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6628 } 6629 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6630 return isSameQualifier( 6631 UX->getQualifier(), 6632 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6633 } 6634 6635 // Using-pack declarations are only created by instantiation, and match if 6636 // they're instantiated from matching UnresolvedUsing...Decls. 6637 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6638 return declaresSameEntity( 6639 UX->getInstantiatedFromUsingDecl(), 6640 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6641 } 6642 6643 // Namespace alias definitions with the same target match. 6644 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6645 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6646 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6647 } 6648 6649 return false; 6650 } 6651 6652 TemplateArgument 6653 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6654 switch (Arg.getKind()) { 6655 case TemplateArgument::Null: 6656 return Arg; 6657 6658 case TemplateArgument::Expression: 6659 return Arg; 6660 6661 case TemplateArgument::Declaration: { 6662 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6663 return TemplateArgument(D, Arg.getParamTypeForDecl()); 6664 } 6665 6666 case TemplateArgument::NullPtr: 6667 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6668 /*isNullPtr*/true); 6669 6670 case TemplateArgument::Template: 6671 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); 6672 6673 case TemplateArgument::TemplateExpansion: 6674 return TemplateArgument(getCanonicalTemplateName( 6675 Arg.getAsTemplateOrTemplatePattern()), 6676 Arg.getNumTemplateExpansions()); 6677 6678 case TemplateArgument::Integral: 6679 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6680 6681 case TemplateArgument::Type: 6682 return TemplateArgument(getCanonicalType(Arg.getAsType())); 6683 6684 case TemplateArgument::Pack: { 6685 if (Arg.pack_size() == 0) 6686 return Arg; 6687 6688 auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()]; 6689 unsigned Idx = 0; 6690 for (TemplateArgument::pack_iterator A = Arg.pack_begin(), 6691 AEnd = Arg.pack_end(); 6692 A != AEnd; (void)++A, ++Idx) 6693 CanonArgs[Idx] = getCanonicalTemplateArgument(*A); 6694 6695 return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size())); 6696 } 6697 } 6698 6699 // Silence GCC warning 6700 llvm_unreachable("Unhandled template argument kind"); 6701 } 6702 6703 NestedNameSpecifier * 6704 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6705 if (!NNS) 6706 return nullptr; 6707 6708 switch (NNS->getKind()) { 6709 case NestedNameSpecifier::Identifier: 6710 // Canonicalize the prefix but keep the identifier the same. 6711 return NestedNameSpecifier::Create(*this, 6712 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6713 NNS->getAsIdentifier()); 6714 6715 case NestedNameSpecifier::Namespace: 6716 // A namespace is canonical; build a nested-name-specifier with 6717 // this namespace and no prefix. 6718 return NestedNameSpecifier::Create(*this, nullptr, 6719 NNS->getAsNamespace()->getOriginalNamespace()); 6720 6721 case NestedNameSpecifier::NamespaceAlias: 6722 // A namespace is canonical; build a nested-name-specifier with 6723 // this namespace and no prefix. 6724 return NestedNameSpecifier::Create(*this, nullptr, 6725 NNS->getAsNamespaceAlias()->getNamespace() 6726 ->getOriginalNamespace()); 6727 6728 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6729 // latter will have the 'template' keyword when printed. 6730 case NestedNameSpecifier::TypeSpec: 6731 case NestedNameSpecifier::TypeSpecWithTemplate: { 6732 const Type *T = getCanonicalType(NNS->getAsType()); 6733 6734 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6735 // break it apart into its prefix and identifier, then reconsititute those 6736 // as the canonical nested-name-specifier. This is required to canonicalize 6737 // a dependent nested-name-specifier involving typedefs of dependent-name 6738 // types, e.g., 6739 // typedef typename T::type T1; 6740 // typedef typename T1::type T2; 6741 if (const auto *DNT = T->getAs<DependentNameType>()) 6742 return NestedNameSpecifier::Create( 6743 *this, DNT->getQualifier(), 6744 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6745 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6746 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6747 const_cast<Type *>(T)); 6748 6749 // TODO: Set 'Template' parameter to true for other template types. 6750 return NestedNameSpecifier::Create(*this, nullptr, false, 6751 const_cast<Type *>(T)); 6752 } 6753 6754 case NestedNameSpecifier::Global: 6755 case NestedNameSpecifier::Super: 6756 // The global specifier and __super specifer are canonical and unique. 6757 return NNS; 6758 } 6759 6760 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6761 } 6762 6763 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6764 // Handle the non-qualified case efficiently. 6765 if (!T.hasLocalQualifiers()) { 6766 // Handle the common positive case fast. 6767 if (const auto *AT = dyn_cast<ArrayType>(T)) 6768 return AT; 6769 } 6770 6771 // Handle the common negative case fast. 6772 if (!isa<ArrayType>(T.getCanonicalType())) 6773 return nullptr; 6774 6775 // Apply any qualifiers from the array type to the element type. This 6776 // implements C99 6.7.3p8: "If the specification of an array type includes 6777 // any type qualifiers, the element type is so qualified, not the array type." 6778 6779 // If we get here, we either have type qualifiers on the type, or we have 6780 // sugar such as a typedef in the way. If we have type qualifiers on the type 6781 // we must propagate them down into the element type. 6782 6783 SplitQualType split = T.getSplitDesugaredType(); 6784 Qualifiers qs = split.Quals; 6785 6786 // If we have a simple case, just return now. 6787 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6788 if (!ATy || qs.empty()) 6789 return ATy; 6790 6791 // Otherwise, we have an array and we have qualifiers on it. Push the 6792 // qualifiers into the array element type and return a new array type. 6793 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6794 6795 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6796 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6797 CAT->getSizeExpr(), 6798 CAT->getSizeModifier(), 6799 CAT->getIndexTypeCVRQualifiers())); 6800 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6801 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6802 IAT->getSizeModifier(), 6803 IAT->getIndexTypeCVRQualifiers())); 6804 6805 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6806 return cast<ArrayType>( 6807 getDependentSizedArrayType(NewEltTy, 6808 DSAT->getSizeExpr(), 6809 DSAT->getSizeModifier(), 6810 DSAT->getIndexTypeCVRQualifiers(), 6811 DSAT->getBracketsRange())); 6812 6813 const auto *VAT = cast<VariableArrayType>(ATy); 6814 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6815 VAT->getSizeExpr(), 6816 VAT->getSizeModifier(), 6817 VAT->getIndexTypeCVRQualifiers(), 6818 VAT->getBracketsRange())); 6819 } 6820 6821 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6822 if (T->isArrayType() || T->isFunctionType()) 6823 return getDecayedType(T); 6824 return T; 6825 } 6826 6827 QualType ASTContext::getSignatureParameterType(QualType T) const { 6828 T = getVariableArrayDecayedType(T); 6829 T = getAdjustedParameterType(T); 6830 return T.getUnqualifiedType(); 6831 } 6832 6833 QualType ASTContext::getExceptionObjectType(QualType T) const { 6834 // C++ [except.throw]p3: 6835 // A throw-expression initializes a temporary object, called the exception 6836 // object, the type of which is determined by removing any top-level 6837 // cv-qualifiers from the static type of the operand of throw and adjusting 6838 // the type from "array of T" or "function returning T" to "pointer to T" 6839 // or "pointer to function returning T", [...] 6840 T = getVariableArrayDecayedType(T); 6841 if (T->isArrayType() || T->isFunctionType()) 6842 T = getDecayedType(T); 6843 return T.getUnqualifiedType(); 6844 } 6845 6846 /// getArrayDecayedType - Return the properly qualified result of decaying the 6847 /// specified array type to a pointer. This operation is non-trivial when 6848 /// handling typedefs etc. The canonical type of "T" must be an array type, 6849 /// this returns a pointer to a properly qualified element of the array. 6850 /// 6851 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6852 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6853 // Get the element type with 'getAsArrayType' so that we don't lose any 6854 // typedefs in the element type of the array. This also handles propagation 6855 // of type qualifiers from the array type into the element type if present 6856 // (C99 6.7.3p8). 6857 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6858 assert(PrettyArrayType && "Not an array type!"); 6859 6860 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6861 6862 // int x[restrict 4] -> int *restrict 6863 QualType Result = getQualifiedType(PtrTy, 6864 PrettyArrayType->getIndexTypeQualifiers()); 6865 6866 // int x[_Nullable] -> int * _Nullable 6867 if (auto Nullability = Ty->getNullability(*this)) { 6868 Result = const_cast<ASTContext *>(this)->getAttributedType( 6869 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6870 } 6871 return Result; 6872 } 6873 6874 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6875 return getBaseElementType(array->getElementType()); 6876 } 6877 6878 QualType ASTContext::getBaseElementType(QualType type) const { 6879 Qualifiers qs; 6880 while (true) { 6881 SplitQualType split = type.getSplitDesugaredType(); 6882 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6883 if (!array) break; 6884 6885 type = array->getElementType(); 6886 qs.addConsistentQualifiers(split.Quals); 6887 } 6888 6889 return getQualifiedType(type, qs); 6890 } 6891 6892 /// getConstantArrayElementCount - Returns number of constant array elements. 6893 uint64_t 6894 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6895 uint64_t ElementCount = 1; 6896 do { 6897 ElementCount *= CA->getSize().getZExtValue(); 6898 CA = dyn_cast_or_null<ConstantArrayType>( 6899 CA->getElementType()->getAsArrayTypeUnsafe()); 6900 } while (CA); 6901 return ElementCount; 6902 } 6903 6904 /// getFloatingRank - Return a relative rank for floating point types. 6905 /// This routine will assert if passed a built-in type that isn't a float. 6906 static FloatingRank getFloatingRank(QualType T) { 6907 if (const auto *CT = T->getAs<ComplexType>()) 6908 return getFloatingRank(CT->getElementType()); 6909 6910 switch (T->castAs<BuiltinType>()->getKind()) { 6911 default: llvm_unreachable("getFloatingRank(): not a floating type"); 6912 case BuiltinType::Float16: return Float16Rank; 6913 case BuiltinType::Half: return HalfRank; 6914 case BuiltinType::Float: return FloatRank; 6915 case BuiltinType::Double: return DoubleRank; 6916 case BuiltinType::LongDouble: return LongDoubleRank; 6917 case BuiltinType::Float128: return Float128Rank; 6918 case BuiltinType::BFloat16: return BFloat16Rank; 6919 case BuiltinType::Ibm128: return Ibm128Rank; 6920 } 6921 } 6922 6923 /// getFloatingTypeOrder - Compare the rank of the two specified floating 6924 /// point types, ignoring the domain of the type (i.e. 'double' == 6925 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 6926 /// LHS < RHS, return -1. 6927 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 6928 FloatingRank LHSR = getFloatingRank(LHS); 6929 FloatingRank RHSR = getFloatingRank(RHS); 6930 6931 if (LHSR == RHSR) 6932 return 0; 6933 if (LHSR > RHSR) 6934 return 1; 6935 return -1; 6936 } 6937 6938 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 6939 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 6940 return 0; 6941 return getFloatingTypeOrder(LHS, RHS); 6942 } 6943 6944 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 6945 /// routine will assert if passed a built-in type that isn't an integer or enum, 6946 /// or if it is not canonicalized. 6947 unsigned ASTContext::getIntegerRank(const Type *T) const { 6948 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 6949 6950 // Results in this 'losing' to any type of the same size, but winning if 6951 // larger. 6952 if (const auto *EIT = dyn_cast<BitIntType>(T)) 6953 return 0 + (EIT->getNumBits() << 3); 6954 6955 switch (cast<BuiltinType>(T)->getKind()) { 6956 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 6957 case BuiltinType::Bool: 6958 return 1 + (getIntWidth(BoolTy) << 3); 6959 case BuiltinType::Char_S: 6960 case BuiltinType::Char_U: 6961 case BuiltinType::SChar: 6962 case BuiltinType::UChar: 6963 return 2 + (getIntWidth(CharTy) << 3); 6964 case BuiltinType::Short: 6965 case BuiltinType::UShort: 6966 return 3 + (getIntWidth(ShortTy) << 3); 6967 case BuiltinType::Int: 6968 case BuiltinType::UInt: 6969 return 4 + (getIntWidth(IntTy) << 3); 6970 case BuiltinType::Long: 6971 case BuiltinType::ULong: 6972 return 5 + (getIntWidth(LongTy) << 3); 6973 case BuiltinType::LongLong: 6974 case BuiltinType::ULongLong: 6975 return 6 + (getIntWidth(LongLongTy) << 3); 6976 case BuiltinType::Int128: 6977 case BuiltinType::UInt128: 6978 return 7 + (getIntWidth(Int128Ty) << 3); 6979 } 6980 } 6981 6982 /// Whether this is a promotable bitfield reference according 6983 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 6984 /// 6985 /// \returns the type this bit-field will promote to, or NULL if no 6986 /// promotion occurs. 6987 QualType ASTContext::isPromotableBitField(Expr *E) const { 6988 if (E->isTypeDependent() || E->isValueDependent()) 6989 return {}; 6990 6991 // C++ [conv.prom]p5: 6992 // If the bit-field has an enumerated type, it is treated as any other 6993 // value of that type for promotion purposes. 6994 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 6995 return {}; 6996 6997 // FIXME: We should not do this unless E->refersToBitField() is true. This 6998 // matters in C where getSourceBitField() will find bit-fields for various 6999 // cases where the source expression is not a bit-field designator. 7000 7001 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 7002 if (!Field) 7003 return {}; 7004 7005 QualType FT = Field->getType(); 7006 7007 uint64_t BitWidth = Field->getBitWidthValue(*this); 7008 uint64_t IntSize = getTypeSize(IntTy); 7009 // C++ [conv.prom]p5: 7010 // A prvalue for an integral bit-field can be converted to a prvalue of type 7011 // int if int can represent all the values of the bit-field; otherwise, it 7012 // can be converted to unsigned int if unsigned int can represent all the 7013 // values of the bit-field. If the bit-field is larger yet, no integral 7014 // promotion applies to it. 7015 // C11 6.3.1.1/2: 7016 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 7017 // If an int can represent all values of the original type (as restricted by 7018 // the width, for a bit-field), the value is converted to an int; otherwise, 7019 // it is converted to an unsigned int. 7020 // 7021 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 7022 // We perform that promotion here to match GCC and C++. 7023 // FIXME: C does not permit promotion of an enum bit-field whose rank is 7024 // greater than that of 'int'. We perform that promotion to match GCC. 7025 if (BitWidth < IntSize) 7026 return IntTy; 7027 7028 if (BitWidth == IntSize) 7029 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 7030 7031 // Bit-fields wider than int are not subject to promotions, and therefore act 7032 // like the base type. GCC has some weird bugs in this area that we 7033 // deliberately do not follow (GCC follows a pre-standard resolution to 7034 // C's DR315 which treats bit-width as being part of the type, and this leaks 7035 // into their semantics in some cases). 7036 return {}; 7037 } 7038 7039 /// getPromotedIntegerType - Returns the type that Promotable will 7040 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 7041 /// integer type. 7042 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 7043 assert(!Promotable.isNull()); 7044 assert(Promotable->isPromotableIntegerType()); 7045 if (const auto *ET = Promotable->getAs<EnumType>()) 7046 return ET->getDecl()->getPromotionType(); 7047 7048 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 7049 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 7050 // (3.9.1) can be converted to a prvalue of the first of the following 7051 // types that can represent all the values of its underlying type: 7052 // int, unsigned int, long int, unsigned long int, long long int, or 7053 // unsigned long long int [...] 7054 // FIXME: Is there some better way to compute this? 7055 if (BT->getKind() == BuiltinType::WChar_S || 7056 BT->getKind() == BuiltinType::WChar_U || 7057 BT->getKind() == BuiltinType::Char8 || 7058 BT->getKind() == BuiltinType::Char16 || 7059 BT->getKind() == BuiltinType::Char32) { 7060 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 7061 uint64_t FromSize = getTypeSize(BT); 7062 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 7063 LongLongTy, UnsignedLongLongTy }; 7064 for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { 7065 uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); 7066 if (FromSize < ToSize || 7067 (FromSize == ToSize && 7068 FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) 7069 return PromoteTypes[Idx]; 7070 } 7071 llvm_unreachable("char type should fit into long long"); 7072 } 7073 } 7074 7075 // At this point, we should have a signed or unsigned integer type. 7076 if (Promotable->isSignedIntegerType()) 7077 return IntTy; 7078 uint64_t PromotableSize = getIntWidth(Promotable); 7079 uint64_t IntSize = getIntWidth(IntTy); 7080 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7081 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7082 } 7083 7084 /// Recurses in pointer/array types until it finds an objc retainable 7085 /// type and returns its ownership. 7086 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7087 while (!T.isNull()) { 7088 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7089 return T.getObjCLifetime(); 7090 if (T->isArrayType()) 7091 T = getBaseElementType(T); 7092 else if (const auto *PT = T->getAs<PointerType>()) 7093 T = PT->getPointeeType(); 7094 else if (const auto *RT = T->getAs<ReferenceType>()) 7095 T = RT->getPointeeType(); 7096 else 7097 break; 7098 } 7099 7100 return Qualifiers::OCL_None; 7101 } 7102 7103 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7104 // Incomplete enum types are not treated as integer types. 7105 // FIXME: In C++, enum types are never integer types. 7106 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7107 return ET->getDecl()->getIntegerType().getTypePtr(); 7108 return nullptr; 7109 } 7110 7111 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7112 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7113 /// LHS < RHS, return -1. 7114 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7115 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7116 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7117 7118 // Unwrap enums to their underlying type. 7119 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7120 LHSC = getIntegerTypeForEnum(ET); 7121 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7122 RHSC = getIntegerTypeForEnum(ET); 7123 7124 if (LHSC == RHSC) return 0; 7125 7126 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7127 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7128 7129 unsigned LHSRank = getIntegerRank(LHSC); 7130 unsigned RHSRank = getIntegerRank(RHSC); 7131 7132 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7133 if (LHSRank == RHSRank) return 0; 7134 return LHSRank > RHSRank ? 1 : -1; 7135 } 7136 7137 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7138 if (LHSUnsigned) { 7139 // If the unsigned [LHS] type is larger, return it. 7140 if (LHSRank >= RHSRank) 7141 return 1; 7142 7143 // If the signed type can represent all values of the unsigned type, it 7144 // wins. Because we are dealing with 2's complement and types that are 7145 // powers of two larger than each other, this is always safe. 7146 return -1; 7147 } 7148 7149 // If the unsigned [RHS] type is larger, return it. 7150 if (RHSRank >= LHSRank) 7151 return -1; 7152 7153 // If the signed type can represent all values of the unsigned type, it 7154 // wins. Because we are dealing with 2's complement and types that are 7155 // powers of two larger than each other, this is always safe. 7156 return 1; 7157 } 7158 7159 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7160 if (CFConstantStringTypeDecl) 7161 return CFConstantStringTypeDecl; 7162 7163 assert(!CFConstantStringTagDecl && 7164 "tag and typedef should be initialized together"); 7165 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7166 CFConstantStringTagDecl->startDefinition(); 7167 7168 struct { 7169 QualType Type; 7170 const char *Name; 7171 } Fields[5]; 7172 unsigned Count = 0; 7173 7174 /// Objective-C ABI 7175 /// 7176 /// typedef struct __NSConstantString_tag { 7177 /// const int *isa; 7178 /// int flags; 7179 /// const char *str; 7180 /// long length; 7181 /// } __NSConstantString; 7182 /// 7183 /// Swift ABI (4.1, 4.2) 7184 /// 7185 /// typedef struct __NSConstantString_tag { 7186 /// uintptr_t _cfisa; 7187 /// uintptr_t _swift_rc; 7188 /// _Atomic(uint64_t) _cfinfoa; 7189 /// const char *_ptr; 7190 /// uint32_t _length; 7191 /// } __NSConstantString; 7192 /// 7193 /// Swift ABI (5.0) 7194 /// 7195 /// typedef struct __NSConstantString_tag { 7196 /// uintptr_t _cfisa; 7197 /// uintptr_t _swift_rc; 7198 /// _Atomic(uint64_t) _cfinfoa; 7199 /// const char *_ptr; 7200 /// uintptr_t _length; 7201 /// } __NSConstantString; 7202 7203 const auto CFRuntime = getLangOpts().CFRuntime; 7204 if (static_cast<unsigned>(CFRuntime) < 7205 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7206 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7207 Fields[Count++] = { IntTy, "flags" }; 7208 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7209 Fields[Count++] = { LongTy, "length" }; 7210 } else { 7211 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7212 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7213 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7214 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7215 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7216 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7217 Fields[Count++] = { IntTy, "_ptr" }; 7218 else 7219 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7220 } 7221 7222 // Create fields 7223 for (unsigned i = 0; i < Count; ++i) { 7224 FieldDecl *Field = 7225 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7226 SourceLocation(), &Idents.get(Fields[i].Name), 7227 Fields[i].Type, /*TInfo=*/nullptr, 7228 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7229 Field->setAccess(AS_public); 7230 CFConstantStringTagDecl->addDecl(Field); 7231 } 7232 7233 CFConstantStringTagDecl->completeDefinition(); 7234 // This type is designed to be compatible with NSConstantString, but cannot 7235 // use the same name, since NSConstantString is an interface. 7236 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7237 CFConstantStringTypeDecl = 7238 buildImplicitTypedef(tagType, "__NSConstantString"); 7239 7240 return CFConstantStringTypeDecl; 7241 } 7242 7243 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7244 if (!CFConstantStringTagDecl) 7245 getCFConstantStringDecl(); // Build the tag and the typedef. 7246 return CFConstantStringTagDecl; 7247 } 7248 7249 // getCFConstantStringType - Return the type used for constant CFStrings. 7250 QualType ASTContext::getCFConstantStringType() const { 7251 return getTypedefType(getCFConstantStringDecl()); 7252 } 7253 7254 QualType ASTContext::getObjCSuperType() const { 7255 if (ObjCSuperType.isNull()) { 7256 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7257 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7258 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7259 } 7260 return ObjCSuperType; 7261 } 7262 7263 void ASTContext::setCFConstantStringType(QualType T) { 7264 const auto *TD = T->castAs<TypedefType>(); 7265 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7266 const auto *TagType = 7267 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7268 CFConstantStringTagDecl = TagType->getDecl(); 7269 } 7270 7271 QualType ASTContext::getBlockDescriptorType() const { 7272 if (BlockDescriptorType) 7273 return getTagDeclType(BlockDescriptorType); 7274 7275 RecordDecl *RD; 7276 // FIXME: Needs the FlagAppleBlock bit. 7277 RD = buildImplicitRecord("__block_descriptor"); 7278 RD->startDefinition(); 7279 7280 QualType FieldTypes[] = { 7281 UnsignedLongTy, 7282 UnsignedLongTy, 7283 }; 7284 7285 static const char *const FieldNames[] = { 7286 "reserved", 7287 "Size" 7288 }; 7289 7290 for (size_t i = 0; i < 2; ++i) { 7291 FieldDecl *Field = FieldDecl::Create( 7292 *this, RD, SourceLocation(), SourceLocation(), 7293 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7294 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7295 Field->setAccess(AS_public); 7296 RD->addDecl(Field); 7297 } 7298 7299 RD->completeDefinition(); 7300 7301 BlockDescriptorType = RD; 7302 7303 return getTagDeclType(BlockDescriptorType); 7304 } 7305 7306 QualType ASTContext::getBlockDescriptorExtendedType() const { 7307 if (BlockDescriptorExtendedType) 7308 return getTagDeclType(BlockDescriptorExtendedType); 7309 7310 RecordDecl *RD; 7311 // FIXME: Needs the FlagAppleBlock bit. 7312 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7313 RD->startDefinition(); 7314 7315 QualType FieldTypes[] = { 7316 UnsignedLongTy, 7317 UnsignedLongTy, 7318 getPointerType(VoidPtrTy), 7319 getPointerType(VoidPtrTy) 7320 }; 7321 7322 static const char *const FieldNames[] = { 7323 "reserved", 7324 "Size", 7325 "CopyFuncPtr", 7326 "DestroyFuncPtr" 7327 }; 7328 7329 for (size_t i = 0; i < 4; ++i) { 7330 FieldDecl *Field = FieldDecl::Create( 7331 *this, RD, SourceLocation(), SourceLocation(), 7332 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7333 /*BitWidth=*/nullptr, 7334 /*Mutable=*/false, ICIS_NoInit); 7335 Field->setAccess(AS_public); 7336 RD->addDecl(Field); 7337 } 7338 7339 RD->completeDefinition(); 7340 7341 BlockDescriptorExtendedType = RD; 7342 return getTagDeclType(BlockDescriptorExtendedType); 7343 } 7344 7345 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7346 const auto *BT = dyn_cast<BuiltinType>(T); 7347 7348 if (!BT) { 7349 if (isa<PipeType>(T)) 7350 return OCLTK_Pipe; 7351 7352 return OCLTK_Default; 7353 } 7354 7355 switch (BT->getKind()) { 7356 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7357 case BuiltinType::Id: \ 7358 return OCLTK_Image; 7359 #include "clang/Basic/OpenCLImageTypes.def" 7360 7361 case BuiltinType::OCLClkEvent: 7362 return OCLTK_ClkEvent; 7363 7364 case BuiltinType::OCLEvent: 7365 return OCLTK_Event; 7366 7367 case BuiltinType::OCLQueue: 7368 return OCLTK_Queue; 7369 7370 case BuiltinType::OCLReserveID: 7371 return OCLTK_ReserveID; 7372 7373 case BuiltinType::OCLSampler: 7374 return OCLTK_Sampler; 7375 7376 default: 7377 return OCLTK_Default; 7378 } 7379 } 7380 7381 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7382 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7383 } 7384 7385 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7386 /// requires copy/dispose. Note that this must match the logic 7387 /// in buildByrefHelpers. 7388 bool ASTContext::BlockRequiresCopying(QualType Ty, 7389 const VarDecl *D) { 7390 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7391 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7392 if (!copyExpr && record->hasTrivialDestructor()) return false; 7393 7394 return true; 7395 } 7396 7397 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7398 // move or destroy. 7399 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7400 return true; 7401 7402 if (!Ty->isObjCRetainableType()) return false; 7403 7404 Qualifiers qs = Ty.getQualifiers(); 7405 7406 // If we have lifetime, that dominates. 7407 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7408 switch (lifetime) { 7409 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7410 7411 // These are just bits as far as the runtime is concerned. 7412 case Qualifiers::OCL_ExplicitNone: 7413 case Qualifiers::OCL_Autoreleasing: 7414 return false; 7415 7416 // These cases should have been taken care of when checking the type's 7417 // non-triviality. 7418 case Qualifiers::OCL_Weak: 7419 case Qualifiers::OCL_Strong: 7420 llvm_unreachable("impossible"); 7421 } 7422 llvm_unreachable("fell out of lifetime switch!"); 7423 } 7424 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7425 Ty->isObjCObjectPointerType()); 7426 } 7427 7428 bool ASTContext::getByrefLifetime(QualType Ty, 7429 Qualifiers::ObjCLifetime &LifeTime, 7430 bool &HasByrefExtendedLayout) const { 7431 if (!getLangOpts().ObjC || 7432 getLangOpts().getGC() != LangOptions::NonGC) 7433 return false; 7434 7435 HasByrefExtendedLayout = false; 7436 if (Ty->isRecordType()) { 7437 HasByrefExtendedLayout = true; 7438 LifeTime = Qualifiers::OCL_None; 7439 } else if ((LifeTime = Ty.getObjCLifetime())) { 7440 // Honor the ARC qualifiers. 7441 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7442 // The MRR rule. 7443 LifeTime = Qualifiers::OCL_ExplicitNone; 7444 } else { 7445 LifeTime = Qualifiers::OCL_None; 7446 } 7447 return true; 7448 } 7449 7450 CanQualType ASTContext::getNSUIntegerType() const { 7451 assert(Target && "Expected target to be initialized"); 7452 const llvm::Triple &T = Target->getTriple(); 7453 // Windows is LLP64 rather than LP64 7454 if (T.isOSWindows() && T.isArch64Bit()) 7455 return UnsignedLongLongTy; 7456 return UnsignedLongTy; 7457 } 7458 7459 CanQualType ASTContext::getNSIntegerType() const { 7460 assert(Target && "Expected target to be initialized"); 7461 const llvm::Triple &T = Target->getTriple(); 7462 // Windows is LLP64 rather than LP64 7463 if (T.isOSWindows() && T.isArch64Bit()) 7464 return LongLongTy; 7465 return LongTy; 7466 } 7467 7468 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7469 if (!ObjCInstanceTypeDecl) 7470 ObjCInstanceTypeDecl = 7471 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7472 return ObjCInstanceTypeDecl; 7473 } 7474 7475 // This returns true if a type has been typedefed to BOOL: 7476 // typedef <type> BOOL; 7477 static bool isTypeTypedefedAsBOOL(QualType T) { 7478 if (const auto *TT = dyn_cast<TypedefType>(T)) 7479 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7480 return II->isStr("BOOL"); 7481 7482 return false; 7483 } 7484 7485 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7486 /// purpose. 7487 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7488 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7489 return CharUnits::Zero(); 7490 7491 CharUnits sz = getTypeSizeInChars(type); 7492 7493 // Make all integer and enum types at least as large as an int 7494 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7495 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7496 // Treat arrays as pointers, since that's how they're passed in. 7497 else if (type->isArrayType()) 7498 sz = getTypeSizeInChars(VoidPtrTy); 7499 return sz; 7500 } 7501 7502 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7503 return getTargetInfo().getCXXABI().isMicrosoft() && 7504 VD->isStaticDataMember() && 7505 VD->getType()->isIntegralOrEnumerationType() && 7506 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7507 } 7508 7509 ASTContext::InlineVariableDefinitionKind 7510 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7511 if (!VD->isInline()) 7512 return InlineVariableDefinitionKind::None; 7513 7514 // In almost all cases, it's a weak definition. 7515 auto *First = VD->getFirstDecl(); 7516 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7517 return InlineVariableDefinitionKind::Weak; 7518 7519 // If there's a file-context declaration in this translation unit, it's a 7520 // non-discardable definition. 7521 for (auto *D : VD->redecls()) 7522 if (D->getLexicalDeclContext()->isFileContext() && 7523 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7524 return InlineVariableDefinitionKind::Strong; 7525 7526 // If we've not seen one yet, we don't know. 7527 return InlineVariableDefinitionKind::WeakUnknown; 7528 } 7529 7530 static std::string charUnitsToString(const CharUnits &CU) { 7531 return llvm::itostr(CU.getQuantity()); 7532 } 7533 7534 /// getObjCEncodingForBlock - Return the encoded type for this block 7535 /// declaration. 7536 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7537 std::string S; 7538 7539 const BlockDecl *Decl = Expr->getBlockDecl(); 7540 QualType BlockTy = 7541 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7542 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7543 // Encode result type. 7544 if (getLangOpts().EncodeExtendedBlockSig) 7545 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7546 true /*Extended*/); 7547 else 7548 getObjCEncodingForType(BlockReturnTy, S); 7549 // Compute size of all parameters. 7550 // Start with computing size of a pointer in number of bytes. 7551 // FIXME: There might(should) be a better way of doing this computation! 7552 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7553 CharUnits ParmOffset = PtrSize; 7554 for (auto PI : Decl->parameters()) { 7555 QualType PType = PI->getType(); 7556 CharUnits sz = getObjCEncodingTypeSize(PType); 7557 if (sz.isZero()) 7558 continue; 7559 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7560 ParmOffset += sz; 7561 } 7562 // Size of the argument frame 7563 S += charUnitsToString(ParmOffset); 7564 // Block pointer and offset. 7565 S += "@?0"; 7566 7567 // Argument types. 7568 ParmOffset = PtrSize; 7569 for (auto PVDecl : Decl->parameters()) { 7570 QualType PType = PVDecl->getOriginalType(); 7571 if (const auto *AT = 7572 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7573 // Use array's original type only if it has known number of 7574 // elements. 7575 if (!isa<ConstantArrayType>(AT)) 7576 PType = PVDecl->getType(); 7577 } else if (PType->isFunctionType()) 7578 PType = PVDecl->getType(); 7579 if (getLangOpts().EncodeExtendedBlockSig) 7580 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7581 S, true /*Extended*/); 7582 else 7583 getObjCEncodingForType(PType, S); 7584 S += charUnitsToString(ParmOffset); 7585 ParmOffset += getObjCEncodingTypeSize(PType); 7586 } 7587 7588 return S; 7589 } 7590 7591 std::string 7592 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7593 std::string S; 7594 // Encode result type. 7595 getObjCEncodingForType(Decl->getReturnType(), S); 7596 CharUnits ParmOffset; 7597 // Compute size of all parameters. 7598 for (auto PI : Decl->parameters()) { 7599 QualType PType = PI->getType(); 7600 CharUnits sz = getObjCEncodingTypeSize(PType); 7601 if (sz.isZero()) 7602 continue; 7603 7604 assert(sz.isPositive() && 7605 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7606 ParmOffset += sz; 7607 } 7608 S += charUnitsToString(ParmOffset); 7609 ParmOffset = CharUnits::Zero(); 7610 7611 // Argument types. 7612 for (auto PVDecl : Decl->parameters()) { 7613 QualType PType = PVDecl->getOriginalType(); 7614 if (const auto *AT = 7615 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7616 // Use array's original type only if it has known number of 7617 // elements. 7618 if (!isa<ConstantArrayType>(AT)) 7619 PType = PVDecl->getType(); 7620 } else if (PType->isFunctionType()) 7621 PType = PVDecl->getType(); 7622 getObjCEncodingForType(PType, S); 7623 S += charUnitsToString(ParmOffset); 7624 ParmOffset += getObjCEncodingTypeSize(PType); 7625 } 7626 7627 return S; 7628 } 7629 7630 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7631 /// method parameter or return type. If Extended, include class names and 7632 /// block object types. 7633 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7634 QualType T, std::string& S, 7635 bool Extended) const { 7636 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7637 getObjCEncodingForTypeQualifier(QT, S); 7638 // Encode parameter type. 7639 ObjCEncOptions Options = ObjCEncOptions() 7640 .setExpandPointedToStructures() 7641 .setExpandStructures() 7642 .setIsOutermostType(); 7643 if (Extended) 7644 Options.setEncodeBlockParameters().setEncodeClassNames(); 7645 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7646 } 7647 7648 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7649 /// declaration. 7650 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7651 bool Extended) const { 7652 // FIXME: This is not very efficient. 7653 // Encode return type. 7654 std::string S; 7655 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7656 Decl->getReturnType(), S, Extended); 7657 // Compute size of all parameters. 7658 // Start with computing size of a pointer in number of bytes. 7659 // FIXME: There might(should) be a better way of doing this computation! 7660 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7661 // The first two arguments (self and _cmd) are pointers; account for 7662 // their size. 7663 CharUnits ParmOffset = 2 * PtrSize; 7664 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7665 E = Decl->sel_param_end(); PI != E; ++PI) { 7666 QualType PType = (*PI)->getType(); 7667 CharUnits sz = getObjCEncodingTypeSize(PType); 7668 if (sz.isZero()) 7669 continue; 7670 7671 assert(sz.isPositive() && 7672 "getObjCEncodingForMethodDecl - Incomplete param type"); 7673 ParmOffset += sz; 7674 } 7675 S += charUnitsToString(ParmOffset); 7676 S += "@0:"; 7677 S += charUnitsToString(PtrSize); 7678 7679 // Argument types. 7680 ParmOffset = 2 * PtrSize; 7681 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7682 E = Decl->sel_param_end(); PI != E; ++PI) { 7683 const ParmVarDecl *PVDecl = *PI; 7684 QualType PType = PVDecl->getOriginalType(); 7685 if (const auto *AT = 7686 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7687 // Use array's original type only if it has known number of 7688 // elements. 7689 if (!isa<ConstantArrayType>(AT)) 7690 PType = PVDecl->getType(); 7691 } else if (PType->isFunctionType()) 7692 PType = PVDecl->getType(); 7693 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7694 PType, S, Extended); 7695 S += charUnitsToString(ParmOffset); 7696 ParmOffset += getObjCEncodingTypeSize(PType); 7697 } 7698 7699 return S; 7700 } 7701 7702 ObjCPropertyImplDecl * 7703 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7704 const ObjCPropertyDecl *PD, 7705 const Decl *Container) const { 7706 if (!Container) 7707 return nullptr; 7708 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7709 for (auto *PID : CID->property_impls()) 7710 if (PID->getPropertyDecl() == PD) 7711 return PID; 7712 } else { 7713 const auto *OID = cast<ObjCImplementationDecl>(Container); 7714 for (auto *PID : OID->property_impls()) 7715 if (PID->getPropertyDecl() == PD) 7716 return PID; 7717 } 7718 return nullptr; 7719 } 7720 7721 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7722 /// property declaration. If non-NULL, Container must be either an 7723 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7724 /// NULL when getting encodings for protocol properties. 7725 /// Property attributes are stored as a comma-delimited C string. The simple 7726 /// attributes readonly and bycopy are encoded as single characters. The 7727 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7728 /// encoded as single characters, followed by an identifier. Property types 7729 /// are also encoded as a parametrized attribute. The characters used to encode 7730 /// these attributes are defined by the following enumeration: 7731 /// @code 7732 /// enum PropertyAttributes { 7733 /// kPropertyReadOnly = 'R', // property is read-only. 7734 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7735 /// kPropertyByref = '&', // property is a reference to the value last assigned 7736 /// kPropertyDynamic = 'D', // property is dynamic 7737 /// kPropertyGetter = 'G', // followed by getter selector name 7738 /// kPropertySetter = 'S', // followed by setter selector name 7739 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7740 /// kPropertyType = 'T' // followed by old-style type encoding. 7741 /// kPropertyWeak = 'W' // 'weak' property 7742 /// kPropertyStrong = 'P' // property GC'able 7743 /// kPropertyNonAtomic = 'N' // property non-atomic 7744 /// }; 7745 /// @endcode 7746 std::string 7747 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7748 const Decl *Container) const { 7749 // Collect information from the property implementation decl(s). 7750 bool Dynamic = false; 7751 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7752 7753 if (ObjCPropertyImplDecl *PropertyImpDecl = 7754 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7755 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7756 Dynamic = true; 7757 else 7758 SynthesizePID = PropertyImpDecl; 7759 } 7760 7761 // FIXME: This is not very efficient. 7762 std::string S = "T"; 7763 7764 // Encode result type. 7765 // GCC has some special rules regarding encoding of properties which 7766 // closely resembles encoding of ivars. 7767 getObjCEncodingForPropertyType(PD->getType(), S); 7768 7769 if (PD->isReadOnly()) { 7770 S += ",R"; 7771 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7772 S += ",C"; 7773 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7774 S += ",&"; 7775 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7776 S += ",W"; 7777 } else { 7778 switch (PD->getSetterKind()) { 7779 case ObjCPropertyDecl::Assign: break; 7780 case ObjCPropertyDecl::Copy: S += ",C"; break; 7781 case ObjCPropertyDecl::Retain: S += ",&"; break; 7782 case ObjCPropertyDecl::Weak: S += ",W"; break; 7783 } 7784 } 7785 7786 // It really isn't clear at all what this means, since properties 7787 // are "dynamic by default". 7788 if (Dynamic) 7789 S += ",D"; 7790 7791 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7792 S += ",N"; 7793 7794 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7795 S += ",G"; 7796 S += PD->getGetterName().getAsString(); 7797 } 7798 7799 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7800 S += ",S"; 7801 S += PD->getSetterName().getAsString(); 7802 } 7803 7804 if (SynthesizePID) { 7805 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7806 S += ",V"; 7807 S += OID->getNameAsString(); 7808 } 7809 7810 // FIXME: OBJCGC: weak & strong 7811 return S; 7812 } 7813 7814 /// getLegacyIntegralTypeEncoding - 7815 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7816 /// 'l' or 'L' , but not always. For typedefs, we need to use 7817 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7818 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7819 if (isa<TypedefType>(PointeeTy.getTypePtr())) { 7820 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7821 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7822 PointeeTy = UnsignedIntTy; 7823 else 7824 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7825 PointeeTy = IntTy; 7826 } 7827 } 7828 } 7829 7830 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7831 const FieldDecl *Field, 7832 QualType *NotEncodedT) const { 7833 // We follow the behavior of gcc, expanding structures which are 7834 // directly pointed to, and expanding embedded structures. Note that 7835 // these rules are sufficient to prevent recursive encoding of the 7836 // same type. 7837 getObjCEncodingForTypeImpl(T, S, 7838 ObjCEncOptions() 7839 .setExpandPointedToStructures() 7840 .setExpandStructures() 7841 .setIsOutermostType(), 7842 Field, NotEncodedT); 7843 } 7844 7845 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7846 std::string& S) const { 7847 // Encode result type. 7848 // GCC has some special rules regarding encoding of properties which 7849 // closely resembles encoding of ivars. 7850 getObjCEncodingForTypeImpl(T, S, 7851 ObjCEncOptions() 7852 .setExpandPointedToStructures() 7853 .setExpandStructures() 7854 .setIsOutermostType() 7855 .setEncodingProperty(), 7856 /*Field=*/nullptr); 7857 } 7858 7859 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7860 const BuiltinType *BT) { 7861 BuiltinType::Kind kind = BT->getKind(); 7862 switch (kind) { 7863 case BuiltinType::Void: return 'v'; 7864 case BuiltinType::Bool: return 'B'; 7865 case BuiltinType::Char8: 7866 case BuiltinType::Char_U: 7867 case BuiltinType::UChar: return 'C'; 7868 case BuiltinType::Char16: 7869 case BuiltinType::UShort: return 'S'; 7870 case BuiltinType::Char32: 7871 case BuiltinType::UInt: return 'I'; 7872 case BuiltinType::ULong: 7873 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7874 case BuiltinType::UInt128: return 'T'; 7875 case BuiltinType::ULongLong: return 'Q'; 7876 case BuiltinType::Char_S: 7877 case BuiltinType::SChar: return 'c'; 7878 case BuiltinType::Short: return 's'; 7879 case BuiltinType::WChar_S: 7880 case BuiltinType::WChar_U: 7881 case BuiltinType::Int: return 'i'; 7882 case BuiltinType::Long: 7883 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7884 case BuiltinType::LongLong: return 'q'; 7885 case BuiltinType::Int128: return 't'; 7886 case BuiltinType::Float: return 'f'; 7887 case BuiltinType::Double: return 'd'; 7888 case BuiltinType::LongDouble: return 'D'; 7889 case BuiltinType::NullPtr: return '*'; // like char* 7890 7891 case BuiltinType::BFloat16: 7892 case BuiltinType::Float16: 7893 case BuiltinType::Float128: 7894 case BuiltinType::Ibm128: 7895 case BuiltinType::Half: 7896 case BuiltinType::ShortAccum: 7897 case BuiltinType::Accum: 7898 case BuiltinType::LongAccum: 7899 case BuiltinType::UShortAccum: 7900 case BuiltinType::UAccum: 7901 case BuiltinType::ULongAccum: 7902 case BuiltinType::ShortFract: 7903 case BuiltinType::Fract: 7904 case BuiltinType::LongFract: 7905 case BuiltinType::UShortFract: 7906 case BuiltinType::UFract: 7907 case BuiltinType::ULongFract: 7908 case BuiltinType::SatShortAccum: 7909 case BuiltinType::SatAccum: 7910 case BuiltinType::SatLongAccum: 7911 case BuiltinType::SatUShortAccum: 7912 case BuiltinType::SatUAccum: 7913 case BuiltinType::SatULongAccum: 7914 case BuiltinType::SatShortFract: 7915 case BuiltinType::SatFract: 7916 case BuiltinType::SatLongFract: 7917 case BuiltinType::SatUShortFract: 7918 case BuiltinType::SatUFract: 7919 case BuiltinType::SatULongFract: 7920 // FIXME: potentially need @encodes for these! 7921 return ' '; 7922 7923 #define SVE_TYPE(Name, Id, SingletonId) \ 7924 case BuiltinType::Id: 7925 #include "clang/Basic/AArch64SVEACLETypes.def" 7926 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 7927 #include "clang/Basic/RISCVVTypes.def" 7928 { 7929 DiagnosticsEngine &Diags = C->getDiagnostics(); 7930 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 7931 "cannot yet @encode type %0"); 7932 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 7933 return ' '; 7934 } 7935 7936 case BuiltinType::ObjCId: 7937 case BuiltinType::ObjCClass: 7938 case BuiltinType::ObjCSel: 7939 llvm_unreachable("@encoding ObjC primitive type"); 7940 7941 // OpenCL and placeholder types don't need @encodings. 7942 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7943 case BuiltinType::Id: 7944 #include "clang/Basic/OpenCLImageTypes.def" 7945 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 7946 case BuiltinType::Id: 7947 #include "clang/Basic/OpenCLExtensionTypes.def" 7948 case BuiltinType::OCLEvent: 7949 case BuiltinType::OCLClkEvent: 7950 case BuiltinType::OCLQueue: 7951 case BuiltinType::OCLReserveID: 7952 case BuiltinType::OCLSampler: 7953 case BuiltinType::Dependent: 7954 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 7955 case BuiltinType::Id: 7956 #include "clang/Basic/PPCTypes.def" 7957 #define BUILTIN_TYPE(KIND, ID) 7958 #define PLACEHOLDER_TYPE(KIND, ID) \ 7959 case BuiltinType::KIND: 7960 #include "clang/AST/BuiltinTypes.def" 7961 llvm_unreachable("invalid builtin type for @encode"); 7962 } 7963 llvm_unreachable("invalid BuiltinType::Kind value"); 7964 } 7965 7966 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 7967 EnumDecl *Enum = ET->getDecl(); 7968 7969 // The encoding of an non-fixed enum type is always 'i', regardless of size. 7970 if (!Enum->isFixed()) 7971 return 'i'; 7972 7973 // The encoding of a fixed enum type matches its fixed underlying type. 7974 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 7975 return getObjCEncodingForPrimitiveType(C, BT); 7976 } 7977 7978 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 7979 QualType T, const FieldDecl *FD) { 7980 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 7981 S += 'b'; 7982 // The NeXT runtime encodes bit fields as b followed by the number of bits. 7983 // The GNU runtime requires more information; bitfields are encoded as b, 7984 // then the offset (in bits) of the first element, then the type of the 7985 // bitfield, then the size in bits. For example, in this structure: 7986 // 7987 // struct 7988 // { 7989 // int integer; 7990 // int flags:2; 7991 // }; 7992 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 7993 // runtime, but b32i2 for the GNU runtime. The reason for this extra 7994 // information is not especially sensible, but we're stuck with it for 7995 // compatibility with GCC, although providing it breaks anything that 7996 // actually uses runtime introspection and wants to work on both runtimes... 7997 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 7998 uint64_t Offset; 7999 8000 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 8001 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 8002 IVD); 8003 } else { 8004 const RecordDecl *RD = FD->getParent(); 8005 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 8006 Offset = RL.getFieldOffset(FD->getFieldIndex()); 8007 } 8008 8009 S += llvm::utostr(Offset); 8010 8011 if (const auto *ET = T->getAs<EnumType>()) 8012 S += ObjCEncodingForEnumType(Ctx, ET); 8013 else { 8014 const auto *BT = T->castAs<BuiltinType>(); 8015 S += getObjCEncodingForPrimitiveType(Ctx, BT); 8016 } 8017 } 8018 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 8019 } 8020 8021 // Helper function for determining whether the encoded type string would include 8022 // a template specialization type. 8023 static bool hasTemplateSpecializationInEncodedString(const Type *T, 8024 bool VisitBasesAndFields) { 8025 T = T->getBaseElementTypeUnsafe(); 8026 8027 if (auto *PT = T->getAs<PointerType>()) 8028 return hasTemplateSpecializationInEncodedString( 8029 PT->getPointeeType().getTypePtr(), false); 8030 8031 auto *CXXRD = T->getAsCXXRecordDecl(); 8032 8033 if (!CXXRD) 8034 return false; 8035 8036 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 8037 return true; 8038 8039 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 8040 return false; 8041 8042 for (auto B : CXXRD->bases()) 8043 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 8044 true)) 8045 return true; 8046 8047 for (auto *FD : CXXRD->fields()) 8048 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 8049 true)) 8050 return true; 8051 8052 return false; 8053 } 8054 8055 // FIXME: Use SmallString for accumulating string. 8056 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 8057 const ObjCEncOptions Options, 8058 const FieldDecl *FD, 8059 QualType *NotEncodedT) const { 8060 CanQualType CT = getCanonicalType(T); 8061 switch (CT->getTypeClass()) { 8062 case Type::Builtin: 8063 case Type::Enum: 8064 if (FD && FD->isBitField()) 8065 return EncodeBitField(this, S, T, FD); 8066 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 8067 S += getObjCEncodingForPrimitiveType(this, BT); 8068 else 8069 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8070 return; 8071 8072 case Type::Complex: 8073 S += 'j'; 8074 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8075 ObjCEncOptions(), 8076 /*Field=*/nullptr); 8077 return; 8078 8079 case Type::Atomic: 8080 S += 'A'; 8081 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8082 ObjCEncOptions(), 8083 /*Field=*/nullptr); 8084 return; 8085 8086 // encoding for pointer or reference types. 8087 case Type::Pointer: 8088 case Type::LValueReference: 8089 case Type::RValueReference: { 8090 QualType PointeeTy; 8091 if (isa<PointerType>(CT)) { 8092 const auto *PT = T->castAs<PointerType>(); 8093 if (PT->isObjCSelType()) { 8094 S += ':'; 8095 return; 8096 } 8097 PointeeTy = PT->getPointeeType(); 8098 } else { 8099 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8100 } 8101 8102 bool isReadOnly = false; 8103 // For historical/compatibility reasons, the read-only qualifier of the 8104 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8105 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8106 // Also, do not emit the 'r' for anything but the outermost type! 8107 if (isa<TypedefType>(T.getTypePtr())) { 8108 if (Options.IsOutermostType() && T.isConstQualified()) { 8109 isReadOnly = true; 8110 S += 'r'; 8111 } 8112 } else if (Options.IsOutermostType()) { 8113 QualType P = PointeeTy; 8114 while (auto PT = P->getAs<PointerType>()) 8115 P = PT->getPointeeType(); 8116 if (P.isConstQualified()) { 8117 isReadOnly = true; 8118 S += 'r'; 8119 } 8120 } 8121 if (isReadOnly) { 8122 // Another legacy compatibility encoding. Some ObjC qualifier and type 8123 // combinations need to be rearranged. 8124 // Rewrite "in const" from "nr" to "rn" 8125 if (StringRef(S).endswith("nr")) 8126 S.replace(S.end()-2, S.end(), "rn"); 8127 } 8128 8129 if (PointeeTy->isCharType()) { 8130 // char pointer types should be encoded as '*' unless it is a 8131 // type that has been typedef'd to 'BOOL'. 8132 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8133 S += '*'; 8134 return; 8135 } 8136 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8137 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8138 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8139 S += '#'; 8140 return; 8141 } 8142 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8143 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8144 S += '@'; 8145 return; 8146 } 8147 // If the encoded string for the class includes template names, just emit 8148 // "^v" for pointers to the class. 8149 if (getLangOpts().CPlusPlus && 8150 (!getLangOpts().EncodeCXXClassTemplateSpec && 8151 hasTemplateSpecializationInEncodedString( 8152 RTy, Options.ExpandPointedToStructures()))) { 8153 S += "^v"; 8154 return; 8155 } 8156 // fall through... 8157 } 8158 S += '^'; 8159 getLegacyIntegralTypeEncoding(PointeeTy); 8160 8161 ObjCEncOptions NewOptions; 8162 if (Options.ExpandPointedToStructures()) 8163 NewOptions.setExpandStructures(); 8164 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8165 /*Field=*/nullptr, NotEncodedT); 8166 return; 8167 } 8168 8169 case Type::ConstantArray: 8170 case Type::IncompleteArray: 8171 case Type::VariableArray: { 8172 const auto *AT = cast<ArrayType>(CT); 8173 8174 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8175 // Incomplete arrays are encoded as a pointer to the array element. 8176 S += '^'; 8177 8178 getObjCEncodingForTypeImpl( 8179 AT->getElementType(), S, 8180 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8181 } else { 8182 S += '['; 8183 8184 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8185 S += llvm::utostr(CAT->getSize().getZExtValue()); 8186 else { 8187 //Variable length arrays are encoded as a regular array with 0 elements. 8188 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8189 "Unknown array type!"); 8190 S += '0'; 8191 } 8192 8193 getObjCEncodingForTypeImpl( 8194 AT->getElementType(), S, 8195 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8196 NotEncodedT); 8197 S += ']'; 8198 } 8199 return; 8200 } 8201 8202 case Type::FunctionNoProto: 8203 case Type::FunctionProto: 8204 S += '?'; 8205 return; 8206 8207 case Type::Record: { 8208 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8209 S += RDecl->isUnion() ? '(' : '{'; 8210 // Anonymous structures print as '?' 8211 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8212 S += II->getName(); 8213 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8214 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8215 llvm::raw_string_ostream OS(S); 8216 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8217 getPrintingPolicy()); 8218 } 8219 } else { 8220 S += '?'; 8221 } 8222 if (Options.ExpandStructures()) { 8223 S += '='; 8224 if (!RDecl->isUnion()) { 8225 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8226 } else { 8227 for (const auto *Field : RDecl->fields()) { 8228 if (FD) { 8229 S += '"'; 8230 S += Field->getNameAsString(); 8231 S += '"'; 8232 } 8233 8234 // Special case bit-fields. 8235 if (Field->isBitField()) { 8236 getObjCEncodingForTypeImpl(Field->getType(), S, 8237 ObjCEncOptions().setExpandStructures(), 8238 Field); 8239 } else { 8240 QualType qt = Field->getType(); 8241 getLegacyIntegralTypeEncoding(qt); 8242 getObjCEncodingForTypeImpl( 8243 qt, S, 8244 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8245 NotEncodedT); 8246 } 8247 } 8248 } 8249 } 8250 S += RDecl->isUnion() ? ')' : '}'; 8251 return; 8252 } 8253 8254 case Type::BlockPointer: { 8255 const auto *BT = T->castAs<BlockPointerType>(); 8256 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8257 if (Options.EncodeBlockParameters()) { 8258 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8259 8260 S += '<'; 8261 // Block return type 8262 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8263 Options.forComponentType(), FD, NotEncodedT); 8264 // Block self 8265 S += "@?"; 8266 // Block parameters 8267 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8268 for (const auto &I : FPT->param_types()) 8269 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8270 NotEncodedT); 8271 } 8272 S += '>'; 8273 } 8274 return; 8275 } 8276 8277 case Type::ObjCObject: { 8278 // hack to match legacy encoding of *id and *Class 8279 QualType Ty = getObjCObjectPointerType(CT); 8280 if (Ty->isObjCIdType()) { 8281 S += "{objc_object=}"; 8282 return; 8283 } 8284 else if (Ty->isObjCClassType()) { 8285 S += "{objc_class=}"; 8286 return; 8287 } 8288 // TODO: Double check to make sure this intentionally falls through. 8289 LLVM_FALLTHROUGH; 8290 } 8291 8292 case Type::ObjCInterface: { 8293 // Ignore protocol qualifiers when mangling at this level. 8294 // @encode(class_name) 8295 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8296 S += '{'; 8297 S += OI->getObjCRuntimeNameAsString(); 8298 if (Options.ExpandStructures()) { 8299 S += '='; 8300 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8301 DeepCollectObjCIvars(OI, true, Ivars); 8302 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8303 const FieldDecl *Field = Ivars[i]; 8304 if (Field->isBitField()) 8305 getObjCEncodingForTypeImpl(Field->getType(), S, 8306 ObjCEncOptions().setExpandStructures(), 8307 Field); 8308 else 8309 getObjCEncodingForTypeImpl(Field->getType(), S, 8310 ObjCEncOptions().setExpandStructures(), FD, 8311 NotEncodedT); 8312 } 8313 } 8314 S += '}'; 8315 return; 8316 } 8317 8318 case Type::ObjCObjectPointer: { 8319 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8320 if (OPT->isObjCIdType()) { 8321 S += '@'; 8322 return; 8323 } 8324 8325 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8326 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8327 // Since this is a binary compatibility issue, need to consult with 8328 // runtime folks. Fortunately, this is a *very* obscure construct. 8329 S += '#'; 8330 return; 8331 } 8332 8333 if (OPT->isObjCQualifiedIdType()) { 8334 getObjCEncodingForTypeImpl( 8335 getObjCIdType(), S, 8336 Options.keepingOnly(ObjCEncOptions() 8337 .setExpandPointedToStructures() 8338 .setExpandStructures()), 8339 FD); 8340 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8341 // Note that we do extended encoding of protocol qualifier list 8342 // Only when doing ivar or property encoding. 8343 S += '"'; 8344 for (const auto *I : OPT->quals()) { 8345 S += '<'; 8346 S += I->getObjCRuntimeNameAsString(); 8347 S += '>'; 8348 } 8349 S += '"'; 8350 } 8351 return; 8352 } 8353 8354 S += '@'; 8355 if (OPT->getInterfaceDecl() && 8356 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8357 S += '"'; 8358 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8359 for (const auto *I : OPT->quals()) { 8360 S += '<'; 8361 S += I->getObjCRuntimeNameAsString(); 8362 S += '>'; 8363 } 8364 S += '"'; 8365 } 8366 return; 8367 } 8368 8369 // gcc just blithely ignores member pointers. 8370 // FIXME: we should do better than that. 'M' is available. 8371 case Type::MemberPointer: 8372 // This matches gcc's encoding, even though technically it is insufficient. 8373 //FIXME. We should do a better job than gcc. 8374 case Type::Vector: 8375 case Type::ExtVector: 8376 // Until we have a coherent encoding of these three types, issue warning. 8377 if (NotEncodedT) 8378 *NotEncodedT = T; 8379 return; 8380 8381 case Type::ConstantMatrix: 8382 if (NotEncodedT) 8383 *NotEncodedT = T; 8384 return; 8385 8386 case Type::BitInt: 8387 if (NotEncodedT) 8388 *NotEncodedT = T; 8389 return; 8390 8391 // We could see an undeduced auto type here during error recovery. 8392 // Just ignore it. 8393 case Type::Auto: 8394 case Type::DeducedTemplateSpecialization: 8395 return; 8396 8397 case Type::Pipe: 8398 #define ABSTRACT_TYPE(KIND, BASE) 8399 #define TYPE(KIND, BASE) 8400 #define DEPENDENT_TYPE(KIND, BASE) \ 8401 case Type::KIND: 8402 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8403 case Type::KIND: 8404 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8405 case Type::KIND: 8406 #include "clang/AST/TypeNodes.inc" 8407 llvm_unreachable("@encode for dependent type!"); 8408 } 8409 llvm_unreachable("bad type kind!"); 8410 } 8411 8412 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8413 std::string &S, 8414 const FieldDecl *FD, 8415 bool includeVBases, 8416 QualType *NotEncodedT) const { 8417 assert(RDecl && "Expected non-null RecordDecl"); 8418 assert(!RDecl->isUnion() && "Should not be called for unions"); 8419 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8420 return; 8421 8422 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8423 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8424 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8425 8426 if (CXXRec) { 8427 for (const auto &BI : CXXRec->bases()) { 8428 if (!BI.isVirtual()) { 8429 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8430 if (base->isEmpty()) 8431 continue; 8432 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8433 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8434 std::make_pair(offs, base)); 8435 } 8436 } 8437 } 8438 8439 unsigned i = 0; 8440 for (FieldDecl *Field : RDecl->fields()) { 8441 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8442 continue; 8443 uint64_t offs = layout.getFieldOffset(i); 8444 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8445 std::make_pair(offs, Field)); 8446 ++i; 8447 } 8448 8449 if (CXXRec && includeVBases) { 8450 for (const auto &BI : CXXRec->vbases()) { 8451 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8452 if (base->isEmpty()) 8453 continue; 8454 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8455 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8456 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8457 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8458 std::make_pair(offs, base)); 8459 } 8460 } 8461 8462 CharUnits size; 8463 if (CXXRec) { 8464 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8465 } else { 8466 size = layout.getSize(); 8467 } 8468 8469 #ifndef NDEBUG 8470 uint64_t CurOffs = 0; 8471 #endif 8472 std::multimap<uint64_t, NamedDecl *>::iterator 8473 CurLayObj = FieldOrBaseOffsets.begin(); 8474 8475 if (CXXRec && CXXRec->isDynamicClass() && 8476 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8477 if (FD) { 8478 S += "\"_vptr$"; 8479 std::string recname = CXXRec->getNameAsString(); 8480 if (recname.empty()) recname = "?"; 8481 S += recname; 8482 S += '"'; 8483 } 8484 S += "^^?"; 8485 #ifndef NDEBUG 8486 CurOffs += getTypeSize(VoidPtrTy); 8487 #endif 8488 } 8489 8490 if (!RDecl->hasFlexibleArrayMember()) { 8491 // Mark the end of the structure. 8492 uint64_t offs = toBits(size); 8493 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8494 std::make_pair(offs, nullptr)); 8495 } 8496 8497 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8498 #ifndef NDEBUG 8499 assert(CurOffs <= CurLayObj->first); 8500 if (CurOffs < CurLayObj->first) { 8501 uint64_t padding = CurLayObj->first - CurOffs; 8502 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8503 // packing/alignment of members is different that normal, in which case 8504 // the encoding will be out-of-sync with the real layout. 8505 // If the runtime switches to just consider the size of types without 8506 // taking into account alignment, we could make padding explicit in the 8507 // encoding (e.g. using arrays of chars). The encoding strings would be 8508 // longer then though. 8509 CurOffs += padding; 8510 } 8511 #endif 8512 8513 NamedDecl *dcl = CurLayObj->second; 8514 if (!dcl) 8515 break; // reached end of structure. 8516 8517 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8518 // We expand the bases without their virtual bases since those are going 8519 // in the initial structure. Note that this differs from gcc which 8520 // expands virtual bases each time one is encountered in the hierarchy, 8521 // making the encoding type bigger than it really is. 8522 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8523 NotEncodedT); 8524 assert(!base->isEmpty()); 8525 #ifndef NDEBUG 8526 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8527 #endif 8528 } else { 8529 const auto *field = cast<FieldDecl>(dcl); 8530 if (FD) { 8531 S += '"'; 8532 S += field->getNameAsString(); 8533 S += '"'; 8534 } 8535 8536 if (field->isBitField()) { 8537 EncodeBitField(this, S, field->getType(), field); 8538 #ifndef NDEBUG 8539 CurOffs += field->getBitWidthValue(*this); 8540 #endif 8541 } else { 8542 QualType qt = field->getType(); 8543 getLegacyIntegralTypeEncoding(qt); 8544 getObjCEncodingForTypeImpl( 8545 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8546 FD, NotEncodedT); 8547 #ifndef NDEBUG 8548 CurOffs += getTypeSize(field->getType()); 8549 #endif 8550 } 8551 } 8552 } 8553 } 8554 8555 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8556 std::string& S) const { 8557 if (QT & Decl::OBJC_TQ_In) 8558 S += 'n'; 8559 if (QT & Decl::OBJC_TQ_Inout) 8560 S += 'N'; 8561 if (QT & Decl::OBJC_TQ_Out) 8562 S += 'o'; 8563 if (QT & Decl::OBJC_TQ_Bycopy) 8564 S += 'O'; 8565 if (QT & Decl::OBJC_TQ_Byref) 8566 S += 'R'; 8567 if (QT & Decl::OBJC_TQ_Oneway) 8568 S += 'V'; 8569 } 8570 8571 TypedefDecl *ASTContext::getObjCIdDecl() const { 8572 if (!ObjCIdDecl) { 8573 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8574 T = getObjCObjectPointerType(T); 8575 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8576 } 8577 return ObjCIdDecl; 8578 } 8579 8580 TypedefDecl *ASTContext::getObjCSelDecl() const { 8581 if (!ObjCSelDecl) { 8582 QualType T = getPointerType(ObjCBuiltinSelTy); 8583 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8584 } 8585 return ObjCSelDecl; 8586 } 8587 8588 TypedefDecl *ASTContext::getObjCClassDecl() const { 8589 if (!ObjCClassDecl) { 8590 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8591 T = getObjCObjectPointerType(T); 8592 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8593 } 8594 return ObjCClassDecl; 8595 } 8596 8597 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8598 if (!ObjCProtocolClassDecl) { 8599 ObjCProtocolClassDecl 8600 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8601 SourceLocation(), 8602 &Idents.get("Protocol"), 8603 /*typeParamList=*/nullptr, 8604 /*PrevDecl=*/nullptr, 8605 SourceLocation(), true); 8606 } 8607 8608 return ObjCProtocolClassDecl; 8609 } 8610 8611 //===----------------------------------------------------------------------===// 8612 // __builtin_va_list Construction Functions 8613 //===----------------------------------------------------------------------===// 8614 8615 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8616 StringRef Name) { 8617 // typedef char* __builtin[_ms]_va_list; 8618 QualType T = Context->getPointerType(Context->CharTy); 8619 return Context->buildImplicitTypedef(T, Name); 8620 } 8621 8622 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8623 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8624 } 8625 8626 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8627 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8628 } 8629 8630 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8631 // typedef void* __builtin_va_list; 8632 QualType T = Context->getPointerType(Context->VoidTy); 8633 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8634 } 8635 8636 static TypedefDecl * 8637 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8638 // struct __va_list 8639 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8640 if (Context->getLangOpts().CPlusPlus) { 8641 // namespace std { struct __va_list { 8642 auto *NS = NamespaceDecl::Create( 8643 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8644 /*Inline*/ false, SourceLocation(), SourceLocation(), 8645 &Context->Idents.get("std"), 8646 /*PrevDecl*/ nullptr); 8647 NS->setImplicit(); 8648 VaListTagDecl->setDeclContext(NS); 8649 } 8650 8651 VaListTagDecl->startDefinition(); 8652 8653 const size_t NumFields = 5; 8654 QualType FieldTypes[NumFields]; 8655 const char *FieldNames[NumFields]; 8656 8657 // void *__stack; 8658 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8659 FieldNames[0] = "__stack"; 8660 8661 // void *__gr_top; 8662 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8663 FieldNames[1] = "__gr_top"; 8664 8665 // void *__vr_top; 8666 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8667 FieldNames[2] = "__vr_top"; 8668 8669 // int __gr_offs; 8670 FieldTypes[3] = Context->IntTy; 8671 FieldNames[3] = "__gr_offs"; 8672 8673 // int __vr_offs; 8674 FieldTypes[4] = Context->IntTy; 8675 FieldNames[4] = "__vr_offs"; 8676 8677 // Create fields 8678 for (unsigned i = 0; i < NumFields; ++i) { 8679 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8680 VaListTagDecl, 8681 SourceLocation(), 8682 SourceLocation(), 8683 &Context->Idents.get(FieldNames[i]), 8684 FieldTypes[i], /*TInfo=*/nullptr, 8685 /*BitWidth=*/nullptr, 8686 /*Mutable=*/false, 8687 ICIS_NoInit); 8688 Field->setAccess(AS_public); 8689 VaListTagDecl->addDecl(Field); 8690 } 8691 VaListTagDecl->completeDefinition(); 8692 Context->VaListTagDecl = VaListTagDecl; 8693 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8694 8695 // } __builtin_va_list; 8696 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8697 } 8698 8699 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8700 // typedef struct __va_list_tag { 8701 RecordDecl *VaListTagDecl; 8702 8703 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8704 VaListTagDecl->startDefinition(); 8705 8706 const size_t NumFields = 5; 8707 QualType FieldTypes[NumFields]; 8708 const char *FieldNames[NumFields]; 8709 8710 // unsigned char gpr; 8711 FieldTypes[0] = Context->UnsignedCharTy; 8712 FieldNames[0] = "gpr"; 8713 8714 // unsigned char fpr; 8715 FieldTypes[1] = Context->UnsignedCharTy; 8716 FieldNames[1] = "fpr"; 8717 8718 // unsigned short reserved; 8719 FieldTypes[2] = Context->UnsignedShortTy; 8720 FieldNames[2] = "reserved"; 8721 8722 // void* overflow_arg_area; 8723 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8724 FieldNames[3] = "overflow_arg_area"; 8725 8726 // void* reg_save_area; 8727 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8728 FieldNames[4] = "reg_save_area"; 8729 8730 // Create fields 8731 for (unsigned i = 0; i < NumFields; ++i) { 8732 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8733 SourceLocation(), 8734 SourceLocation(), 8735 &Context->Idents.get(FieldNames[i]), 8736 FieldTypes[i], /*TInfo=*/nullptr, 8737 /*BitWidth=*/nullptr, 8738 /*Mutable=*/false, 8739 ICIS_NoInit); 8740 Field->setAccess(AS_public); 8741 VaListTagDecl->addDecl(Field); 8742 } 8743 VaListTagDecl->completeDefinition(); 8744 Context->VaListTagDecl = VaListTagDecl; 8745 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8746 8747 // } __va_list_tag; 8748 TypedefDecl *VaListTagTypedefDecl = 8749 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8750 8751 QualType VaListTagTypedefType = 8752 Context->getTypedefType(VaListTagTypedefDecl); 8753 8754 // typedef __va_list_tag __builtin_va_list[1]; 8755 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8756 QualType VaListTagArrayType 8757 = Context->getConstantArrayType(VaListTagTypedefType, 8758 Size, nullptr, ArrayType::Normal, 0); 8759 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8760 } 8761 8762 static TypedefDecl * 8763 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8764 // struct __va_list_tag { 8765 RecordDecl *VaListTagDecl; 8766 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8767 VaListTagDecl->startDefinition(); 8768 8769 const size_t NumFields = 4; 8770 QualType FieldTypes[NumFields]; 8771 const char *FieldNames[NumFields]; 8772 8773 // unsigned gp_offset; 8774 FieldTypes[0] = Context->UnsignedIntTy; 8775 FieldNames[0] = "gp_offset"; 8776 8777 // unsigned fp_offset; 8778 FieldTypes[1] = Context->UnsignedIntTy; 8779 FieldNames[1] = "fp_offset"; 8780 8781 // void* overflow_arg_area; 8782 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8783 FieldNames[2] = "overflow_arg_area"; 8784 8785 // void* reg_save_area; 8786 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8787 FieldNames[3] = "reg_save_area"; 8788 8789 // Create fields 8790 for (unsigned i = 0; i < NumFields; ++i) { 8791 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8792 VaListTagDecl, 8793 SourceLocation(), 8794 SourceLocation(), 8795 &Context->Idents.get(FieldNames[i]), 8796 FieldTypes[i], /*TInfo=*/nullptr, 8797 /*BitWidth=*/nullptr, 8798 /*Mutable=*/false, 8799 ICIS_NoInit); 8800 Field->setAccess(AS_public); 8801 VaListTagDecl->addDecl(Field); 8802 } 8803 VaListTagDecl->completeDefinition(); 8804 Context->VaListTagDecl = VaListTagDecl; 8805 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8806 8807 // }; 8808 8809 // typedef struct __va_list_tag __builtin_va_list[1]; 8810 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8811 QualType VaListTagArrayType = Context->getConstantArrayType( 8812 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8813 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8814 } 8815 8816 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8817 // typedef int __builtin_va_list[4]; 8818 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8819 QualType IntArrayType = Context->getConstantArrayType( 8820 Context->IntTy, Size, nullptr, ArrayType::Normal, 0); 8821 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8822 } 8823 8824 static TypedefDecl * 8825 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8826 // struct __va_list 8827 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8828 if (Context->getLangOpts().CPlusPlus) { 8829 // namespace std { struct __va_list { 8830 NamespaceDecl *NS; 8831 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8832 Context->getTranslationUnitDecl(), 8833 /*Inline*/false, SourceLocation(), 8834 SourceLocation(), &Context->Idents.get("std"), 8835 /*PrevDecl*/ nullptr); 8836 NS->setImplicit(); 8837 VaListDecl->setDeclContext(NS); 8838 } 8839 8840 VaListDecl->startDefinition(); 8841 8842 // void * __ap; 8843 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8844 VaListDecl, 8845 SourceLocation(), 8846 SourceLocation(), 8847 &Context->Idents.get("__ap"), 8848 Context->getPointerType(Context->VoidTy), 8849 /*TInfo=*/nullptr, 8850 /*BitWidth=*/nullptr, 8851 /*Mutable=*/false, 8852 ICIS_NoInit); 8853 Field->setAccess(AS_public); 8854 VaListDecl->addDecl(Field); 8855 8856 // }; 8857 VaListDecl->completeDefinition(); 8858 Context->VaListTagDecl = VaListDecl; 8859 8860 // typedef struct __va_list __builtin_va_list; 8861 QualType T = Context->getRecordType(VaListDecl); 8862 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8863 } 8864 8865 static TypedefDecl * 8866 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8867 // struct __va_list_tag { 8868 RecordDecl *VaListTagDecl; 8869 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8870 VaListTagDecl->startDefinition(); 8871 8872 const size_t NumFields = 4; 8873 QualType FieldTypes[NumFields]; 8874 const char *FieldNames[NumFields]; 8875 8876 // long __gpr; 8877 FieldTypes[0] = Context->LongTy; 8878 FieldNames[0] = "__gpr"; 8879 8880 // long __fpr; 8881 FieldTypes[1] = Context->LongTy; 8882 FieldNames[1] = "__fpr"; 8883 8884 // void *__overflow_arg_area; 8885 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8886 FieldNames[2] = "__overflow_arg_area"; 8887 8888 // void *__reg_save_area; 8889 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8890 FieldNames[3] = "__reg_save_area"; 8891 8892 // Create fields 8893 for (unsigned i = 0; i < NumFields; ++i) { 8894 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8895 VaListTagDecl, 8896 SourceLocation(), 8897 SourceLocation(), 8898 &Context->Idents.get(FieldNames[i]), 8899 FieldTypes[i], /*TInfo=*/nullptr, 8900 /*BitWidth=*/nullptr, 8901 /*Mutable=*/false, 8902 ICIS_NoInit); 8903 Field->setAccess(AS_public); 8904 VaListTagDecl->addDecl(Field); 8905 } 8906 VaListTagDecl->completeDefinition(); 8907 Context->VaListTagDecl = VaListTagDecl; 8908 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8909 8910 // }; 8911 8912 // typedef __va_list_tag __builtin_va_list[1]; 8913 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8914 QualType VaListTagArrayType = Context->getConstantArrayType( 8915 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8916 8917 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8918 } 8919 8920 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 8921 // typedef struct __va_list_tag { 8922 RecordDecl *VaListTagDecl; 8923 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8924 VaListTagDecl->startDefinition(); 8925 8926 const size_t NumFields = 3; 8927 QualType FieldTypes[NumFields]; 8928 const char *FieldNames[NumFields]; 8929 8930 // void *CurrentSavedRegisterArea; 8931 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8932 FieldNames[0] = "__current_saved_reg_area_pointer"; 8933 8934 // void *SavedRegAreaEnd; 8935 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8936 FieldNames[1] = "__saved_reg_area_end_pointer"; 8937 8938 // void *OverflowArea; 8939 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8940 FieldNames[2] = "__overflow_area_pointer"; 8941 8942 // Create fields 8943 for (unsigned i = 0; i < NumFields; ++i) { 8944 FieldDecl *Field = FieldDecl::Create( 8945 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 8946 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 8947 /*TInfo=*/nullptr, 8948 /*BitWidth=*/nullptr, 8949 /*Mutable=*/false, ICIS_NoInit); 8950 Field->setAccess(AS_public); 8951 VaListTagDecl->addDecl(Field); 8952 } 8953 VaListTagDecl->completeDefinition(); 8954 Context->VaListTagDecl = VaListTagDecl; 8955 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8956 8957 // } __va_list_tag; 8958 TypedefDecl *VaListTagTypedefDecl = 8959 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8960 8961 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 8962 8963 // typedef __va_list_tag __builtin_va_list[1]; 8964 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8965 QualType VaListTagArrayType = Context->getConstantArrayType( 8966 VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); 8967 8968 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8969 } 8970 8971 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 8972 TargetInfo::BuiltinVaListKind Kind) { 8973 switch (Kind) { 8974 case TargetInfo::CharPtrBuiltinVaList: 8975 return CreateCharPtrBuiltinVaListDecl(Context); 8976 case TargetInfo::VoidPtrBuiltinVaList: 8977 return CreateVoidPtrBuiltinVaListDecl(Context); 8978 case TargetInfo::AArch64ABIBuiltinVaList: 8979 return CreateAArch64ABIBuiltinVaListDecl(Context); 8980 case TargetInfo::PowerABIBuiltinVaList: 8981 return CreatePowerABIBuiltinVaListDecl(Context); 8982 case TargetInfo::X86_64ABIBuiltinVaList: 8983 return CreateX86_64ABIBuiltinVaListDecl(Context); 8984 case TargetInfo::PNaClABIBuiltinVaList: 8985 return CreatePNaClABIBuiltinVaListDecl(Context); 8986 case TargetInfo::AAPCSABIBuiltinVaList: 8987 return CreateAAPCSABIBuiltinVaListDecl(Context); 8988 case TargetInfo::SystemZBuiltinVaList: 8989 return CreateSystemZBuiltinVaListDecl(Context); 8990 case TargetInfo::HexagonBuiltinVaList: 8991 return CreateHexagonBuiltinVaListDecl(Context); 8992 } 8993 8994 llvm_unreachable("Unhandled __builtin_va_list type kind"); 8995 } 8996 8997 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 8998 if (!BuiltinVaListDecl) { 8999 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 9000 assert(BuiltinVaListDecl->isImplicit()); 9001 } 9002 9003 return BuiltinVaListDecl; 9004 } 9005 9006 Decl *ASTContext::getVaListTagDecl() const { 9007 // Force the creation of VaListTagDecl by building the __builtin_va_list 9008 // declaration. 9009 if (!VaListTagDecl) 9010 (void)getBuiltinVaListDecl(); 9011 9012 return VaListTagDecl; 9013 } 9014 9015 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 9016 if (!BuiltinMSVaListDecl) 9017 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 9018 9019 return BuiltinMSVaListDecl; 9020 } 9021 9022 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 9023 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 9024 } 9025 9026 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 9027 assert(ObjCConstantStringType.isNull() && 9028 "'NSConstantString' type already set!"); 9029 9030 ObjCConstantStringType = getObjCInterfaceType(Decl); 9031 } 9032 9033 /// Retrieve the template name that corresponds to a non-empty 9034 /// lookup. 9035 TemplateName 9036 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 9037 UnresolvedSetIterator End) const { 9038 unsigned size = End - Begin; 9039 assert(size > 1 && "set is not overloaded!"); 9040 9041 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 9042 size * sizeof(FunctionTemplateDecl*)); 9043 auto *OT = new (memory) OverloadedTemplateStorage(size); 9044 9045 NamedDecl **Storage = OT->getStorage(); 9046 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 9047 NamedDecl *D = *I; 9048 assert(isa<FunctionTemplateDecl>(D) || 9049 isa<UnresolvedUsingValueDecl>(D) || 9050 (isa<UsingShadowDecl>(D) && 9051 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 9052 *Storage++ = D; 9053 } 9054 9055 return TemplateName(OT); 9056 } 9057 9058 /// Retrieve a template name representing an unqualified-id that has been 9059 /// assumed to name a template for ADL purposes. 9060 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 9061 auto *OT = new (*this) AssumedTemplateStorage(Name); 9062 return TemplateName(OT); 9063 } 9064 9065 /// Retrieve the template name that represents a qualified 9066 /// template name such as \c std::vector. 9067 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 9068 bool TemplateKeyword, 9069 TemplateName Template) const { 9070 assert(NNS && "Missing nested-name-specifier in qualified template name"); 9071 9072 // FIXME: Canonicalization? 9073 llvm::FoldingSetNodeID ID; 9074 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9075 9076 void *InsertPos = nullptr; 9077 QualifiedTemplateName *QTN = 9078 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9079 if (!QTN) { 9080 QTN = new (*this, alignof(QualifiedTemplateName)) 9081 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9082 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9083 } 9084 9085 return TemplateName(QTN); 9086 } 9087 9088 /// Retrieve the template name that represents a dependent 9089 /// template name such as \c MetaFun::template apply. 9090 TemplateName 9091 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9092 const IdentifierInfo *Name) const { 9093 assert((!NNS || NNS->isDependent()) && 9094 "Nested name specifier must be dependent"); 9095 9096 llvm::FoldingSetNodeID ID; 9097 DependentTemplateName::Profile(ID, NNS, Name); 9098 9099 void *InsertPos = nullptr; 9100 DependentTemplateName *QTN = 9101 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9102 9103 if (QTN) 9104 return TemplateName(QTN); 9105 9106 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9107 if (CanonNNS == NNS) { 9108 QTN = new (*this, alignof(DependentTemplateName)) 9109 DependentTemplateName(NNS, Name); 9110 } else { 9111 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9112 QTN = new (*this, alignof(DependentTemplateName)) 9113 DependentTemplateName(NNS, Name, Canon); 9114 DependentTemplateName *CheckQTN = 9115 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9116 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9117 (void)CheckQTN; 9118 } 9119 9120 DependentTemplateNames.InsertNode(QTN, InsertPos); 9121 return TemplateName(QTN); 9122 } 9123 9124 /// Retrieve the template name that represents a dependent 9125 /// template name such as \c MetaFun::template operator+. 9126 TemplateName 9127 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9128 OverloadedOperatorKind Operator) const { 9129 assert((!NNS || NNS->isDependent()) && 9130 "Nested name specifier must be dependent"); 9131 9132 llvm::FoldingSetNodeID ID; 9133 DependentTemplateName::Profile(ID, NNS, Operator); 9134 9135 void *InsertPos = nullptr; 9136 DependentTemplateName *QTN 9137 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9138 9139 if (QTN) 9140 return TemplateName(QTN); 9141 9142 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9143 if (CanonNNS == NNS) { 9144 QTN = new (*this, alignof(DependentTemplateName)) 9145 DependentTemplateName(NNS, Operator); 9146 } else { 9147 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9148 QTN = new (*this, alignof(DependentTemplateName)) 9149 DependentTemplateName(NNS, Operator, Canon); 9150 9151 DependentTemplateName *CheckQTN 9152 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9153 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9154 (void)CheckQTN; 9155 } 9156 9157 DependentTemplateNames.InsertNode(QTN, InsertPos); 9158 return TemplateName(QTN); 9159 } 9160 9161 TemplateName 9162 ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, 9163 TemplateName replacement) const { 9164 llvm::FoldingSetNodeID ID; 9165 SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); 9166 9167 void *insertPos = nullptr; 9168 SubstTemplateTemplateParmStorage *subst 9169 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9170 9171 if (!subst) { 9172 subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); 9173 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9174 } 9175 9176 return TemplateName(subst); 9177 } 9178 9179 TemplateName 9180 ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, 9181 const TemplateArgument &ArgPack) const { 9182 auto &Self = const_cast<ASTContext &>(*this); 9183 llvm::FoldingSetNodeID ID; 9184 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); 9185 9186 void *InsertPos = nullptr; 9187 SubstTemplateTemplateParmPackStorage *Subst 9188 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9189 9190 if (!Subst) { 9191 Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, 9192 ArgPack.pack_size(), 9193 ArgPack.pack_begin()); 9194 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9195 } 9196 9197 return TemplateName(Subst); 9198 } 9199 9200 /// getFromTargetType - Given one of the integer types provided by 9201 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9202 /// is actually a value of type @c TargetInfo::IntType. 9203 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9204 switch (Type) { 9205 case TargetInfo::NoInt: return {}; 9206 case TargetInfo::SignedChar: return SignedCharTy; 9207 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9208 case TargetInfo::SignedShort: return ShortTy; 9209 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9210 case TargetInfo::SignedInt: return IntTy; 9211 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9212 case TargetInfo::SignedLong: return LongTy; 9213 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9214 case TargetInfo::SignedLongLong: return LongLongTy; 9215 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9216 } 9217 9218 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9219 } 9220 9221 //===----------------------------------------------------------------------===// 9222 // Type Predicates. 9223 //===----------------------------------------------------------------------===// 9224 9225 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9226 /// garbage collection attribute. 9227 /// 9228 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9229 if (getLangOpts().getGC() == LangOptions::NonGC) 9230 return Qualifiers::GCNone; 9231 9232 assert(getLangOpts().ObjC); 9233 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9234 9235 // Default behaviour under objective-C's gc is for ObjC pointers 9236 // (or pointers to them) be treated as though they were declared 9237 // as __strong. 9238 if (GCAttrs == Qualifiers::GCNone) { 9239 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9240 return Qualifiers::Strong; 9241 else if (Ty->isPointerType()) 9242 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9243 } else { 9244 // It's not valid to set GC attributes on anything that isn't a 9245 // pointer. 9246 #ifndef NDEBUG 9247 QualType CT = Ty->getCanonicalTypeInternal(); 9248 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9249 CT = AT->getElementType(); 9250 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9251 #endif 9252 } 9253 return GCAttrs; 9254 } 9255 9256 //===----------------------------------------------------------------------===// 9257 // Type Compatibility Testing 9258 //===----------------------------------------------------------------------===// 9259 9260 /// areCompatVectorTypes - Return true if the two specified vector types are 9261 /// compatible. 9262 static bool areCompatVectorTypes(const VectorType *LHS, 9263 const VectorType *RHS) { 9264 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9265 return LHS->getElementType() == RHS->getElementType() && 9266 LHS->getNumElements() == RHS->getNumElements(); 9267 } 9268 9269 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9270 /// compatible. 9271 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9272 const ConstantMatrixType *RHS) { 9273 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9274 return LHS->getElementType() == RHS->getElementType() && 9275 LHS->getNumRows() == RHS->getNumRows() && 9276 LHS->getNumColumns() == RHS->getNumColumns(); 9277 } 9278 9279 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9280 QualType SecondVec) { 9281 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9282 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9283 9284 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9285 return true; 9286 9287 // Treat Neon vector types and most AltiVec vector types as if they are the 9288 // equivalent GCC vector types. 9289 const auto *First = FirstVec->castAs<VectorType>(); 9290 const auto *Second = SecondVec->castAs<VectorType>(); 9291 if (First->getNumElements() == Second->getNumElements() && 9292 hasSameType(First->getElementType(), Second->getElementType()) && 9293 First->getVectorKind() != VectorType::AltiVecPixel && 9294 First->getVectorKind() != VectorType::AltiVecBool && 9295 Second->getVectorKind() != VectorType::AltiVecPixel && 9296 Second->getVectorKind() != VectorType::AltiVecBool && 9297 First->getVectorKind() != VectorType::SveFixedLengthDataVector && 9298 First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && 9299 Second->getVectorKind() != VectorType::SveFixedLengthDataVector && 9300 Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) 9301 return true; 9302 9303 return false; 9304 } 9305 9306 /// getSVETypeSize - Return SVE vector or predicate register size. 9307 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9308 assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); 9309 return Ty->getKind() == BuiltinType::SveBool 9310 ? (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth() 9311 : Context.getLangOpts().VScaleMin * 128; 9312 } 9313 9314 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9315 QualType SecondType) { 9316 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 9317 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 9318 "Expected SVE builtin type and vector type!"); 9319 9320 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9321 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9322 if (const auto *VT = SecondType->getAs<VectorType>()) { 9323 // Predicates have the same representation as uint8 so we also have to 9324 // check the kind to make these types incompatible. 9325 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 9326 return BT->getKind() == BuiltinType::SveBool; 9327 else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 9328 return VT->getElementType().getCanonicalType() == 9329 FirstType->getSveEltType(*this); 9330 else if (VT->getVectorKind() == VectorType::GenericVector) 9331 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9332 hasSameType(VT->getElementType(), 9333 getBuiltinVectorTypeInfo(BT).ElementType); 9334 } 9335 } 9336 return false; 9337 }; 9338 9339 return IsValidCast(FirstType, SecondType) || 9340 IsValidCast(SecondType, FirstType); 9341 } 9342 9343 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9344 QualType SecondType) { 9345 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 9346 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 9347 "Expected SVE builtin type and vector type!"); 9348 9349 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9350 const auto *BT = FirstType->getAs<BuiltinType>(); 9351 if (!BT) 9352 return false; 9353 9354 const auto *VecTy = SecondType->getAs<VectorType>(); 9355 if (VecTy && 9356 (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || 9357 VecTy->getVectorKind() == VectorType::GenericVector)) { 9358 const LangOptions::LaxVectorConversionKind LVCKind = 9359 getLangOpts().getLaxVectorConversions(); 9360 9361 // Can not convert between sve predicates and sve vectors because of 9362 // different size. 9363 if (BT->getKind() == BuiltinType::SveBool && 9364 VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) 9365 return false; 9366 9367 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9368 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9369 // converts to VLAT and VLAT implicitly converts to GNUT." 9370 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9371 // predicates. 9372 if (VecTy->getVectorKind() == VectorType::GenericVector && 9373 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9374 return false; 9375 9376 // If -flax-vector-conversions=all is specified, the types are 9377 // certainly compatible. 9378 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9379 return true; 9380 9381 // If -flax-vector-conversions=integer is specified, the types are 9382 // compatible if the elements are integer types. 9383 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9384 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9385 FirstType->getSveEltType(*this)->isIntegerType(); 9386 } 9387 9388 return false; 9389 }; 9390 9391 return IsLaxCompatible(FirstType, SecondType) || 9392 IsLaxCompatible(SecondType, FirstType); 9393 } 9394 9395 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9396 while (true) { 9397 // __strong id 9398 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9399 if (Attr->getAttrKind() == attr::ObjCOwnership) 9400 return true; 9401 9402 Ty = Attr->getModifiedType(); 9403 9404 // X *__strong (...) 9405 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9406 Ty = Paren->getInnerType(); 9407 9408 // We do not want to look through typedefs, typeof(expr), 9409 // typeof(type), or any other way that the type is somehow 9410 // abstracted. 9411 } else { 9412 return false; 9413 } 9414 } 9415 } 9416 9417 //===----------------------------------------------------------------------===// 9418 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9419 //===----------------------------------------------------------------------===// 9420 9421 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9422 /// inheritance hierarchy of 'rProto'. 9423 bool 9424 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9425 ObjCProtocolDecl *rProto) const { 9426 if (declaresSameEntity(lProto, rProto)) 9427 return true; 9428 for (auto *PI : rProto->protocols()) 9429 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9430 return true; 9431 return false; 9432 } 9433 9434 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9435 /// Class<pr1, ...>. 9436 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9437 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9438 for (auto *lhsProto : lhs->quals()) { 9439 bool match = false; 9440 for (auto *rhsProto : rhs->quals()) { 9441 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9442 match = true; 9443 break; 9444 } 9445 } 9446 if (!match) 9447 return false; 9448 } 9449 return true; 9450 } 9451 9452 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9453 /// ObjCQualifiedIDType. 9454 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9455 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9456 bool compare) { 9457 // Allow id<P..> and an 'id' in all cases. 9458 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9459 return true; 9460 9461 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9462 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9463 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9464 return false; 9465 9466 if (lhs->isObjCQualifiedIdType()) { 9467 if (rhs->qual_empty()) { 9468 // If the RHS is a unqualified interface pointer "NSString*", 9469 // make sure we check the class hierarchy. 9470 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9471 for (auto *I : lhs->quals()) { 9472 // when comparing an id<P> on lhs with a static type on rhs, 9473 // see if static class implements all of id's protocols, directly or 9474 // through its super class and categories. 9475 if (!rhsID->ClassImplementsProtocol(I, true)) 9476 return false; 9477 } 9478 } 9479 // If there are no qualifiers and no interface, we have an 'id'. 9480 return true; 9481 } 9482 // Both the right and left sides have qualifiers. 9483 for (auto *lhsProto : lhs->quals()) { 9484 bool match = false; 9485 9486 // when comparing an id<P> on lhs with a static type on rhs, 9487 // see if static class implements all of id's protocols, directly or 9488 // through its super class and categories. 9489 for (auto *rhsProto : rhs->quals()) { 9490 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9491 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9492 match = true; 9493 break; 9494 } 9495 } 9496 // If the RHS is a qualified interface pointer "NSString<P>*", 9497 // make sure we check the class hierarchy. 9498 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9499 for (auto *I : lhs->quals()) { 9500 // when comparing an id<P> on lhs with a static type on rhs, 9501 // see if static class implements all of id's protocols, directly or 9502 // through its super class and categories. 9503 if (rhsID->ClassImplementsProtocol(I, true)) { 9504 match = true; 9505 break; 9506 } 9507 } 9508 } 9509 if (!match) 9510 return false; 9511 } 9512 9513 return true; 9514 } 9515 9516 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9517 9518 if (lhs->getInterfaceType()) { 9519 // If both the right and left sides have qualifiers. 9520 for (auto *lhsProto : lhs->quals()) { 9521 bool match = false; 9522 9523 // when comparing an id<P> on rhs with a static type on lhs, 9524 // see if static class implements all of id's protocols, directly or 9525 // through its super class and categories. 9526 // First, lhs protocols in the qualifier list must be found, direct 9527 // or indirect in rhs's qualifier list or it is a mismatch. 9528 for (auto *rhsProto : rhs->quals()) { 9529 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9530 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9531 match = true; 9532 break; 9533 } 9534 } 9535 if (!match) 9536 return false; 9537 } 9538 9539 // Static class's protocols, or its super class or category protocols 9540 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9541 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9542 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9543 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9544 // This is rather dubious but matches gcc's behavior. If lhs has 9545 // no type qualifier and its class has no static protocol(s) 9546 // assume that it is mismatch. 9547 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9548 return false; 9549 for (auto *lhsProto : LHSInheritedProtocols) { 9550 bool match = false; 9551 for (auto *rhsProto : rhs->quals()) { 9552 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9553 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9554 match = true; 9555 break; 9556 } 9557 } 9558 if (!match) 9559 return false; 9560 } 9561 } 9562 return true; 9563 } 9564 return false; 9565 } 9566 9567 /// canAssignObjCInterfaces - Return true if the two interface types are 9568 /// compatible for assignment from RHS to LHS. This handles validation of any 9569 /// protocol qualifiers on the LHS or RHS. 9570 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9571 const ObjCObjectPointerType *RHSOPT) { 9572 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9573 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9574 9575 // If either type represents the built-in 'id' type, return true. 9576 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9577 return true; 9578 9579 // Function object that propagates a successful result or handles 9580 // __kindof types. 9581 auto finish = [&](bool succeeded) -> bool { 9582 if (succeeded) 9583 return true; 9584 9585 if (!RHS->isKindOfType()) 9586 return false; 9587 9588 // Strip off __kindof and protocol qualifiers, then check whether 9589 // we can assign the other way. 9590 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9591 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9592 }; 9593 9594 // Casts from or to id<P> are allowed when the other side has compatible 9595 // protocols. 9596 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9597 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9598 } 9599 9600 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9601 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9602 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9603 } 9604 9605 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9606 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9607 return true; 9608 } 9609 9610 // If we have 2 user-defined types, fall into that path. 9611 if (LHS->getInterface() && RHS->getInterface()) { 9612 return finish(canAssignObjCInterfaces(LHS, RHS)); 9613 } 9614 9615 return false; 9616 } 9617 9618 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9619 /// for providing type-safety for objective-c pointers used to pass/return 9620 /// arguments in block literals. When passed as arguments, passing 'A*' where 9621 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9622 /// not OK. For the return type, the opposite is not OK. 9623 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9624 const ObjCObjectPointerType *LHSOPT, 9625 const ObjCObjectPointerType *RHSOPT, 9626 bool BlockReturnType) { 9627 9628 // Function object that propagates a successful result or handles 9629 // __kindof types. 9630 auto finish = [&](bool succeeded) -> bool { 9631 if (succeeded) 9632 return true; 9633 9634 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9635 if (!Expected->isKindOfType()) 9636 return false; 9637 9638 // Strip off __kindof and protocol qualifiers, then check whether 9639 // we can assign the other way. 9640 return canAssignObjCInterfacesInBlockPointer( 9641 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9642 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9643 BlockReturnType); 9644 }; 9645 9646 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9647 return true; 9648 9649 if (LHSOPT->isObjCBuiltinType()) { 9650 return finish(RHSOPT->isObjCBuiltinType() || 9651 RHSOPT->isObjCQualifiedIdType()); 9652 } 9653 9654 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9655 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9656 // Use for block parameters previous type checking for compatibility. 9657 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9658 // Or corrected type checking as in non-compat mode. 9659 (!BlockReturnType && 9660 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9661 else 9662 return finish(ObjCQualifiedIdTypesAreCompatible( 9663 (BlockReturnType ? LHSOPT : RHSOPT), 9664 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9665 } 9666 9667 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9668 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9669 if (LHS && RHS) { // We have 2 user-defined types. 9670 if (LHS != RHS) { 9671 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9672 return finish(BlockReturnType); 9673 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9674 return finish(!BlockReturnType); 9675 } 9676 else 9677 return true; 9678 } 9679 return false; 9680 } 9681 9682 /// Comparison routine for Objective-C protocols to be used with 9683 /// llvm::array_pod_sort. 9684 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9685 ObjCProtocolDecl * const *rhs) { 9686 return (*lhs)->getName().compare((*rhs)->getName()); 9687 } 9688 9689 /// getIntersectionOfProtocols - This routine finds the intersection of set 9690 /// of protocols inherited from two distinct objective-c pointer objects with 9691 /// the given common base. 9692 /// It is used to build composite qualifier list of the composite type of 9693 /// the conditional expression involving two objective-c pointer objects. 9694 static 9695 void getIntersectionOfProtocols(ASTContext &Context, 9696 const ObjCInterfaceDecl *CommonBase, 9697 const ObjCObjectPointerType *LHSOPT, 9698 const ObjCObjectPointerType *RHSOPT, 9699 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9700 9701 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9702 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9703 assert(LHS->getInterface() && "LHS must have an interface base"); 9704 assert(RHS->getInterface() && "RHS must have an interface base"); 9705 9706 // Add all of the protocols for the LHS. 9707 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9708 9709 // Start with the protocol qualifiers. 9710 for (auto proto : LHS->quals()) { 9711 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9712 } 9713 9714 // Also add the protocols associated with the LHS interface. 9715 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9716 9717 // Add all of the protocols for the RHS. 9718 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9719 9720 // Start with the protocol qualifiers. 9721 for (auto proto : RHS->quals()) { 9722 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9723 } 9724 9725 // Also add the protocols associated with the RHS interface. 9726 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9727 9728 // Compute the intersection of the collected protocol sets. 9729 for (auto proto : LHSProtocolSet) { 9730 if (RHSProtocolSet.count(proto)) 9731 IntersectionSet.push_back(proto); 9732 } 9733 9734 // Compute the set of protocols that is implied by either the common type or 9735 // the protocols within the intersection. 9736 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9737 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9738 9739 // Remove any implied protocols from the list of inherited protocols. 9740 if (!ImpliedProtocols.empty()) { 9741 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9742 return ImpliedProtocols.contains(proto); 9743 }); 9744 } 9745 9746 // Sort the remaining protocols by name. 9747 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9748 compareObjCProtocolsByName); 9749 } 9750 9751 /// Determine whether the first type is a subtype of the second. 9752 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9753 QualType rhs) { 9754 // Common case: two object pointers. 9755 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9756 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9757 if (lhsOPT && rhsOPT) 9758 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9759 9760 // Two block pointers. 9761 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9762 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9763 if (lhsBlock && rhsBlock) 9764 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9765 9766 // If either is an unqualified 'id' and the other is a block, it's 9767 // acceptable. 9768 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9769 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9770 return true; 9771 9772 return false; 9773 } 9774 9775 // Check that the given Objective-C type argument lists are equivalent. 9776 static bool sameObjCTypeArgs(ASTContext &ctx, 9777 const ObjCInterfaceDecl *iface, 9778 ArrayRef<QualType> lhsArgs, 9779 ArrayRef<QualType> rhsArgs, 9780 bool stripKindOf) { 9781 if (lhsArgs.size() != rhsArgs.size()) 9782 return false; 9783 9784 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9785 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9786 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9787 continue; 9788 9789 switch (typeParams->begin()[i]->getVariance()) { 9790 case ObjCTypeParamVariance::Invariant: 9791 if (!stripKindOf || 9792 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9793 rhsArgs[i].stripObjCKindOfType(ctx))) { 9794 return false; 9795 } 9796 break; 9797 9798 case ObjCTypeParamVariance::Covariant: 9799 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 9800 return false; 9801 break; 9802 9803 case ObjCTypeParamVariance::Contravariant: 9804 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 9805 return false; 9806 break; 9807 } 9808 } 9809 9810 return true; 9811 } 9812 9813 QualType ASTContext::areCommonBaseCompatible( 9814 const ObjCObjectPointerType *Lptr, 9815 const ObjCObjectPointerType *Rptr) { 9816 const ObjCObjectType *LHS = Lptr->getObjectType(); 9817 const ObjCObjectType *RHS = Rptr->getObjectType(); 9818 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 9819 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 9820 9821 if (!LDecl || !RDecl) 9822 return {}; 9823 9824 // When either LHS or RHS is a kindof type, we should return a kindof type. 9825 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 9826 // kindof(A). 9827 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 9828 9829 // Follow the left-hand side up the class hierarchy until we either hit a 9830 // root or find the RHS. Record the ancestors in case we don't find it. 9831 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 9832 LHSAncestors; 9833 while (true) { 9834 // Record this ancestor. We'll need this if the common type isn't in the 9835 // path from the LHS to the root. 9836 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 9837 9838 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 9839 // Get the type arguments. 9840 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 9841 bool anyChanges = false; 9842 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9843 // Both have type arguments, compare them. 9844 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9845 LHS->getTypeArgs(), RHS->getTypeArgs(), 9846 /*stripKindOf=*/true)) 9847 return {}; 9848 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9849 // If only one has type arguments, the result will not have type 9850 // arguments. 9851 LHSTypeArgs = {}; 9852 anyChanges = true; 9853 } 9854 9855 // Compute the intersection of protocols. 9856 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9857 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 9858 Protocols); 9859 if (!Protocols.empty()) 9860 anyChanges = true; 9861 9862 // If anything in the LHS will have changed, build a new result type. 9863 // If we need to return a kindof type but LHS is not a kindof type, we 9864 // build a new result type. 9865 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 9866 QualType Result = getObjCInterfaceType(LHS->getInterface()); 9867 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 9868 anyKindOf || LHS->isKindOfType()); 9869 return getObjCObjectPointerType(Result); 9870 } 9871 9872 return getObjCObjectPointerType(QualType(LHS, 0)); 9873 } 9874 9875 // Find the superclass. 9876 QualType LHSSuperType = LHS->getSuperClassType(); 9877 if (LHSSuperType.isNull()) 9878 break; 9879 9880 LHS = LHSSuperType->castAs<ObjCObjectType>(); 9881 } 9882 9883 // We didn't find anything by following the LHS to its root; now check 9884 // the RHS against the cached set of ancestors. 9885 while (true) { 9886 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 9887 if (KnownLHS != LHSAncestors.end()) { 9888 LHS = KnownLHS->second; 9889 9890 // Get the type arguments. 9891 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 9892 bool anyChanges = false; 9893 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9894 // Both have type arguments, compare them. 9895 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9896 LHS->getTypeArgs(), RHS->getTypeArgs(), 9897 /*stripKindOf=*/true)) 9898 return {}; 9899 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9900 // If only one has type arguments, the result will not have type 9901 // arguments. 9902 RHSTypeArgs = {}; 9903 anyChanges = true; 9904 } 9905 9906 // Compute the intersection of protocols. 9907 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9908 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 9909 Protocols); 9910 if (!Protocols.empty()) 9911 anyChanges = true; 9912 9913 // If we need to return a kindof type but RHS is not a kindof type, we 9914 // build a new result type. 9915 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 9916 QualType Result = getObjCInterfaceType(RHS->getInterface()); 9917 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 9918 anyKindOf || RHS->isKindOfType()); 9919 return getObjCObjectPointerType(Result); 9920 } 9921 9922 return getObjCObjectPointerType(QualType(RHS, 0)); 9923 } 9924 9925 // Find the superclass of the RHS. 9926 QualType RHSSuperType = RHS->getSuperClassType(); 9927 if (RHSSuperType.isNull()) 9928 break; 9929 9930 RHS = RHSSuperType->castAs<ObjCObjectType>(); 9931 } 9932 9933 return {}; 9934 } 9935 9936 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 9937 const ObjCObjectType *RHS) { 9938 assert(LHS->getInterface() && "LHS is not an interface type"); 9939 assert(RHS->getInterface() && "RHS is not an interface type"); 9940 9941 // Verify that the base decls are compatible: the RHS must be a subclass of 9942 // the LHS. 9943 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 9944 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 9945 if (!IsSuperClass) 9946 return false; 9947 9948 // If the LHS has protocol qualifiers, determine whether all of them are 9949 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 9950 // LHS). 9951 if (LHS->getNumProtocols() > 0) { 9952 // OK if conversion of LHS to SuperClass results in narrowing of types 9953 // ; i.e., SuperClass may implement at least one of the protocols 9954 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 9955 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 9956 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 9957 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 9958 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 9959 // qualifiers. 9960 for (auto *RHSPI : RHS->quals()) 9961 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 9962 // If there is no protocols associated with RHS, it is not a match. 9963 if (SuperClassInheritedProtocols.empty()) 9964 return false; 9965 9966 for (const auto *LHSProto : LHS->quals()) { 9967 bool SuperImplementsProtocol = false; 9968 for (auto *SuperClassProto : SuperClassInheritedProtocols) 9969 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 9970 SuperImplementsProtocol = true; 9971 break; 9972 } 9973 if (!SuperImplementsProtocol) 9974 return false; 9975 } 9976 } 9977 9978 // If the LHS is specialized, we may need to check type arguments. 9979 if (LHS->isSpecialized()) { 9980 // Follow the superclass chain until we've matched the LHS class in the 9981 // hierarchy. This substitutes type arguments through. 9982 const ObjCObjectType *RHSSuper = RHS; 9983 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 9984 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 9985 9986 // If the RHS is specializd, compare type arguments. 9987 if (RHSSuper->isSpecialized() && 9988 !sameObjCTypeArgs(*this, LHS->getInterface(), 9989 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 9990 /*stripKindOf=*/true)) { 9991 return false; 9992 } 9993 } 9994 9995 return true; 9996 } 9997 9998 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 9999 // get the "pointed to" types 10000 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 10001 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 10002 10003 if (!LHSOPT || !RHSOPT) 10004 return false; 10005 10006 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 10007 canAssignObjCInterfaces(RHSOPT, LHSOPT); 10008 } 10009 10010 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 10011 return canAssignObjCInterfaces( 10012 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 10013 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 10014 } 10015 10016 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 10017 /// both shall have the identically qualified version of a compatible type. 10018 /// C99 6.2.7p1: Two types have compatible types if their types are the 10019 /// same. See 6.7.[2,3,5] for additional rules. 10020 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 10021 bool CompareUnqualified) { 10022 if (getLangOpts().CPlusPlus) 10023 return hasSameType(LHS, RHS); 10024 10025 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 10026 } 10027 10028 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 10029 return typesAreCompatible(LHS, RHS); 10030 } 10031 10032 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 10033 return !mergeTypes(LHS, RHS, true).isNull(); 10034 } 10035 10036 /// mergeTransparentUnionType - if T is a transparent union type and a member 10037 /// of T is compatible with SubType, return the merged type, else return 10038 /// QualType() 10039 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 10040 bool OfBlockPointer, 10041 bool Unqualified) { 10042 if (const RecordType *UT = T->getAsUnionType()) { 10043 RecordDecl *UD = UT->getDecl(); 10044 if (UD->hasAttr<TransparentUnionAttr>()) { 10045 for (const auto *I : UD->fields()) { 10046 QualType ET = I->getType().getUnqualifiedType(); 10047 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 10048 if (!MT.isNull()) 10049 return MT; 10050 } 10051 } 10052 } 10053 10054 return {}; 10055 } 10056 10057 /// mergeFunctionParameterTypes - merge two types which appear as function 10058 /// parameter types 10059 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 10060 bool OfBlockPointer, 10061 bool Unqualified) { 10062 // GNU extension: two types are compatible if they appear as a function 10063 // argument, one of the types is a transparent union type and the other 10064 // type is compatible with a union member 10065 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 10066 Unqualified); 10067 if (!lmerge.isNull()) 10068 return lmerge; 10069 10070 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10071 Unqualified); 10072 if (!rmerge.isNull()) 10073 return rmerge; 10074 10075 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10076 } 10077 10078 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10079 bool OfBlockPointer, bool Unqualified, 10080 bool AllowCXX) { 10081 const auto *lbase = lhs->castAs<FunctionType>(); 10082 const auto *rbase = rhs->castAs<FunctionType>(); 10083 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10084 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10085 bool allLTypes = true; 10086 bool allRTypes = true; 10087 10088 // Check return type 10089 QualType retType; 10090 if (OfBlockPointer) { 10091 QualType RHS = rbase->getReturnType(); 10092 QualType LHS = lbase->getReturnType(); 10093 bool UnqualifiedResult = Unqualified; 10094 if (!UnqualifiedResult) 10095 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10096 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10097 } 10098 else 10099 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10100 Unqualified); 10101 if (retType.isNull()) 10102 return {}; 10103 10104 if (Unqualified) 10105 retType = retType.getUnqualifiedType(); 10106 10107 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10108 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10109 if (Unqualified) { 10110 LRetType = LRetType.getUnqualifiedType(); 10111 RRetType = RRetType.getUnqualifiedType(); 10112 } 10113 10114 if (getCanonicalType(retType) != LRetType) 10115 allLTypes = false; 10116 if (getCanonicalType(retType) != RRetType) 10117 allRTypes = false; 10118 10119 // FIXME: double check this 10120 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10121 // rbase->getRegParmAttr() != 0 && 10122 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10123 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10124 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10125 10126 // Compatible functions must have compatible calling conventions 10127 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10128 return {}; 10129 10130 // Regparm is part of the calling convention. 10131 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10132 return {}; 10133 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10134 return {}; 10135 10136 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10137 return {}; 10138 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10139 return {}; 10140 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10141 return {}; 10142 10143 // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. 10144 bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10145 10146 if (lbaseInfo.getNoReturn() != NoReturn) 10147 allLTypes = false; 10148 if (rbaseInfo.getNoReturn() != NoReturn) 10149 allRTypes = false; 10150 10151 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10152 10153 if (lproto && rproto) { // two C99 style function prototypes 10154 assert((AllowCXX || 10155 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10156 "C++ shouldn't be here"); 10157 // Compatible functions must have the same number of parameters 10158 if (lproto->getNumParams() != rproto->getNumParams()) 10159 return {}; 10160 10161 // Variadic and non-variadic functions aren't compatible 10162 if (lproto->isVariadic() != rproto->isVariadic()) 10163 return {}; 10164 10165 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10166 return {}; 10167 10168 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10169 bool canUseLeft, canUseRight; 10170 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10171 newParamInfos)) 10172 return {}; 10173 10174 if (!canUseLeft) 10175 allLTypes = false; 10176 if (!canUseRight) 10177 allRTypes = false; 10178 10179 // Check parameter type compatibility 10180 SmallVector<QualType, 10> types; 10181 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10182 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10183 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10184 QualType paramType = mergeFunctionParameterTypes( 10185 lParamType, rParamType, OfBlockPointer, Unqualified); 10186 if (paramType.isNull()) 10187 return {}; 10188 10189 if (Unqualified) 10190 paramType = paramType.getUnqualifiedType(); 10191 10192 types.push_back(paramType); 10193 if (Unqualified) { 10194 lParamType = lParamType.getUnqualifiedType(); 10195 rParamType = rParamType.getUnqualifiedType(); 10196 } 10197 10198 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10199 allLTypes = false; 10200 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10201 allRTypes = false; 10202 } 10203 10204 if (allLTypes) return lhs; 10205 if (allRTypes) return rhs; 10206 10207 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10208 EPI.ExtInfo = einfo; 10209 EPI.ExtParameterInfos = 10210 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10211 return getFunctionType(retType, types, EPI); 10212 } 10213 10214 if (lproto) allRTypes = false; 10215 if (rproto) allLTypes = false; 10216 10217 const FunctionProtoType *proto = lproto ? lproto : rproto; 10218 if (proto) { 10219 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10220 if (proto->isVariadic()) 10221 return {}; 10222 // Check that the types are compatible with the types that 10223 // would result from default argument promotions (C99 6.7.5.3p15). 10224 // The only types actually affected are promotable integer 10225 // types and floats, which would be passed as a different 10226 // type depending on whether the prototype is visible. 10227 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10228 QualType paramTy = proto->getParamType(i); 10229 10230 // Look at the converted type of enum types, since that is the type used 10231 // to pass enum values. 10232 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10233 paramTy = Enum->getDecl()->getIntegerType(); 10234 if (paramTy.isNull()) 10235 return {}; 10236 } 10237 10238 if (paramTy->isPromotableIntegerType() || 10239 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10240 return {}; 10241 } 10242 10243 if (allLTypes) return lhs; 10244 if (allRTypes) return rhs; 10245 10246 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10247 EPI.ExtInfo = einfo; 10248 return getFunctionType(retType, proto->getParamTypes(), EPI); 10249 } 10250 10251 if (allLTypes) return lhs; 10252 if (allRTypes) return rhs; 10253 return getFunctionNoProtoType(retType, einfo); 10254 } 10255 10256 /// Given that we have an enum type and a non-enum type, try to merge them. 10257 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10258 QualType other, bool isBlockReturnType) { 10259 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10260 // a signed integer type, or an unsigned integer type. 10261 // Compatibility is based on the underlying type, not the promotion 10262 // type. 10263 QualType underlyingType = ET->getDecl()->getIntegerType(); 10264 if (underlyingType.isNull()) 10265 return {}; 10266 if (Context.hasSameType(underlyingType, other)) 10267 return other; 10268 10269 // In block return types, we're more permissive and accept any 10270 // integral type of the same size. 10271 if (isBlockReturnType && other->isIntegerType() && 10272 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10273 return other; 10274 10275 return {}; 10276 } 10277 10278 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, 10279 bool OfBlockPointer, 10280 bool Unqualified, bool BlockReturnType) { 10281 // For C++ we will not reach this code with reference types (see below), 10282 // for OpenMP variant call overloading we might. 10283 // 10284 // C++ [expr]: If an expression initially has the type "reference to T", the 10285 // type is adjusted to "T" prior to any further analysis, the expression 10286 // designates the object or function denoted by the reference, and the 10287 // expression is an lvalue unless the reference is an rvalue reference and 10288 // the expression is a function call (possibly inside parentheses). 10289 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10290 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10291 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10292 LHS->getTypeClass() == RHS->getTypeClass()) 10293 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10294 OfBlockPointer, Unqualified, BlockReturnType); 10295 if (LHSRefTy || RHSRefTy) 10296 return {}; 10297 10298 if (Unqualified) { 10299 LHS = LHS.getUnqualifiedType(); 10300 RHS = RHS.getUnqualifiedType(); 10301 } 10302 10303 QualType LHSCan = getCanonicalType(LHS), 10304 RHSCan = getCanonicalType(RHS); 10305 10306 // If two types are identical, they are compatible. 10307 if (LHSCan == RHSCan) 10308 return LHS; 10309 10310 // If the qualifiers are different, the types aren't compatible... mostly. 10311 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10312 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10313 if (LQuals != RQuals) { 10314 // If any of these qualifiers are different, we have a type 10315 // mismatch. 10316 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10317 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10318 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10319 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10320 return {}; 10321 10322 // Exactly one GC qualifier difference is allowed: __strong is 10323 // okay if the other type has no GC qualifier but is an Objective 10324 // C object pointer (i.e. implicitly strong by default). We fix 10325 // this by pretending that the unqualified type was actually 10326 // qualified __strong. 10327 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10328 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10329 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10330 10331 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10332 return {}; 10333 10334 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10335 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10336 } 10337 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10338 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10339 } 10340 return {}; 10341 } 10342 10343 // Okay, qualifiers are equal. 10344 10345 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10346 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10347 10348 // We want to consider the two function types to be the same for these 10349 // comparisons, just force one to the other. 10350 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10351 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10352 10353 // Same as above for arrays 10354 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10355 LHSClass = Type::ConstantArray; 10356 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10357 RHSClass = Type::ConstantArray; 10358 10359 // ObjCInterfaces are just specialized ObjCObjects. 10360 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10361 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10362 10363 // Canonicalize ExtVector -> Vector. 10364 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10365 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10366 10367 // If the canonical type classes don't match. 10368 if (LHSClass != RHSClass) { 10369 // Note that we only have special rules for turning block enum 10370 // returns into block int returns, not vice-versa. 10371 if (const auto *ETy = LHS->getAs<EnumType>()) { 10372 return mergeEnumWithInteger(*this, ETy, RHS, false); 10373 } 10374 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10375 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10376 } 10377 // allow block pointer type to match an 'id' type. 10378 if (OfBlockPointer && !BlockReturnType) { 10379 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10380 return LHS; 10381 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10382 return RHS; 10383 } 10384 // Allow __auto_type to match anything; it merges to the type with more 10385 // information. 10386 if (const auto *AT = LHS->getAs<AutoType>()) { 10387 if (!AT->isDeduced() && AT->isGNUAutoType()) 10388 return RHS; 10389 } 10390 if (const auto *AT = RHS->getAs<AutoType>()) { 10391 if (!AT->isDeduced() && AT->isGNUAutoType()) 10392 return LHS; 10393 } 10394 return {}; 10395 } 10396 10397 // The canonical type classes match. 10398 switch (LHSClass) { 10399 #define TYPE(Class, Base) 10400 #define ABSTRACT_TYPE(Class, Base) 10401 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10402 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10403 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10404 #include "clang/AST/TypeNodes.inc" 10405 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10406 10407 case Type::Auto: 10408 case Type::DeducedTemplateSpecialization: 10409 case Type::LValueReference: 10410 case Type::RValueReference: 10411 case Type::MemberPointer: 10412 llvm_unreachable("C++ should never be in mergeTypes"); 10413 10414 case Type::ObjCInterface: 10415 case Type::IncompleteArray: 10416 case Type::VariableArray: 10417 case Type::FunctionProto: 10418 case Type::ExtVector: 10419 llvm_unreachable("Types are eliminated above"); 10420 10421 case Type::Pointer: 10422 { 10423 // Merge two pointer types, while trying to preserve typedef info 10424 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10425 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10426 if (Unqualified) { 10427 LHSPointee = LHSPointee.getUnqualifiedType(); 10428 RHSPointee = RHSPointee.getUnqualifiedType(); 10429 } 10430 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10431 Unqualified); 10432 if (ResultType.isNull()) 10433 return {}; 10434 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10435 return LHS; 10436 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10437 return RHS; 10438 return getPointerType(ResultType); 10439 } 10440 case Type::BlockPointer: 10441 { 10442 // Merge two block pointer types, while trying to preserve typedef info 10443 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10444 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10445 if (Unqualified) { 10446 LHSPointee = LHSPointee.getUnqualifiedType(); 10447 RHSPointee = RHSPointee.getUnqualifiedType(); 10448 } 10449 if (getLangOpts().OpenCL) { 10450 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10451 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10452 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10453 // 6.12.5) thus the following check is asymmetric. 10454 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10455 return {}; 10456 LHSPteeQual.removeAddressSpace(); 10457 RHSPteeQual.removeAddressSpace(); 10458 LHSPointee = 10459 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10460 RHSPointee = 10461 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10462 } 10463 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10464 Unqualified); 10465 if (ResultType.isNull()) 10466 return {}; 10467 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10468 return LHS; 10469 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10470 return RHS; 10471 return getBlockPointerType(ResultType); 10472 } 10473 case Type::Atomic: 10474 { 10475 // Merge two pointer types, while trying to preserve typedef info 10476 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10477 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10478 if (Unqualified) { 10479 LHSValue = LHSValue.getUnqualifiedType(); 10480 RHSValue = RHSValue.getUnqualifiedType(); 10481 } 10482 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10483 Unqualified); 10484 if (ResultType.isNull()) 10485 return {}; 10486 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10487 return LHS; 10488 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10489 return RHS; 10490 return getAtomicType(ResultType); 10491 } 10492 case Type::ConstantArray: 10493 { 10494 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10495 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10496 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10497 return {}; 10498 10499 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10500 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10501 if (Unqualified) { 10502 LHSElem = LHSElem.getUnqualifiedType(); 10503 RHSElem = RHSElem.getUnqualifiedType(); 10504 } 10505 10506 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10507 if (ResultType.isNull()) 10508 return {}; 10509 10510 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10511 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10512 10513 // If either side is a variable array, and both are complete, check whether 10514 // the current dimension is definite. 10515 if (LVAT || RVAT) { 10516 auto SizeFetch = [this](const VariableArrayType* VAT, 10517 const ConstantArrayType* CAT) 10518 -> std::pair<bool,llvm::APInt> { 10519 if (VAT) { 10520 Optional<llvm::APSInt> TheInt; 10521 Expr *E = VAT->getSizeExpr(); 10522 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10523 return std::make_pair(true, *TheInt); 10524 return std::make_pair(false, llvm::APSInt()); 10525 } 10526 if (CAT) 10527 return std::make_pair(true, CAT->getSize()); 10528 return std::make_pair(false, llvm::APInt()); 10529 }; 10530 10531 bool HaveLSize, HaveRSize; 10532 llvm::APInt LSize, RSize; 10533 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10534 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10535 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10536 return {}; // Definite, but unequal, array dimension 10537 } 10538 10539 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10540 return LHS; 10541 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10542 return RHS; 10543 if (LCAT) 10544 return getConstantArrayType(ResultType, LCAT->getSize(), 10545 LCAT->getSizeExpr(), 10546 ArrayType::ArraySizeModifier(), 0); 10547 if (RCAT) 10548 return getConstantArrayType(ResultType, RCAT->getSize(), 10549 RCAT->getSizeExpr(), 10550 ArrayType::ArraySizeModifier(), 0); 10551 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10552 return LHS; 10553 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10554 return RHS; 10555 if (LVAT) { 10556 // FIXME: This isn't correct! But tricky to implement because 10557 // the array's size has to be the size of LHS, but the type 10558 // has to be different. 10559 return LHS; 10560 } 10561 if (RVAT) { 10562 // FIXME: This isn't correct! But tricky to implement because 10563 // the array's size has to be the size of RHS, but the type 10564 // has to be different. 10565 return RHS; 10566 } 10567 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10568 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10569 return getIncompleteArrayType(ResultType, 10570 ArrayType::ArraySizeModifier(), 0); 10571 } 10572 case Type::FunctionNoProto: 10573 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); 10574 case Type::Record: 10575 case Type::Enum: 10576 return {}; 10577 case Type::Builtin: 10578 // Only exactly equal builtin types are compatible, which is tested above. 10579 return {}; 10580 case Type::Complex: 10581 // Distinct complex types are incompatible. 10582 return {}; 10583 case Type::Vector: 10584 // FIXME: The merged type should be an ExtVector! 10585 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10586 RHSCan->castAs<VectorType>())) 10587 return LHS; 10588 return {}; 10589 case Type::ConstantMatrix: 10590 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10591 RHSCan->castAs<ConstantMatrixType>())) 10592 return LHS; 10593 return {}; 10594 case Type::ObjCObject: { 10595 // Check if the types are assignment compatible. 10596 // FIXME: This should be type compatibility, e.g. whether 10597 // "LHS x; RHS x;" at global scope is legal. 10598 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10599 RHS->castAs<ObjCObjectType>())) 10600 return LHS; 10601 return {}; 10602 } 10603 case Type::ObjCObjectPointer: 10604 if (OfBlockPointer) { 10605 if (canAssignObjCInterfacesInBlockPointer( 10606 LHS->castAs<ObjCObjectPointerType>(), 10607 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10608 return LHS; 10609 return {}; 10610 } 10611 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10612 RHS->castAs<ObjCObjectPointerType>())) 10613 return LHS; 10614 return {}; 10615 case Type::Pipe: 10616 assert(LHS != RHS && 10617 "Equivalent pipe types should have already been handled!"); 10618 return {}; 10619 case Type::BitInt: { 10620 // Merge two bit-precise int types, while trying to preserve typedef info. 10621 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10622 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10623 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10624 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10625 10626 // Like unsigned/int, shouldn't have a type if they don't match. 10627 if (LHSUnsigned != RHSUnsigned) 10628 return {}; 10629 10630 if (LHSBits != RHSBits) 10631 return {}; 10632 return LHS; 10633 } 10634 } 10635 10636 llvm_unreachable("Invalid Type::Class!"); 10637 } 10638 10639 bool ASTContext::mergeExtParameterInfo( 10640 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10641 bool &CanUseFirst, bool &CanUseSecond, 10642 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10643 assert(NewParamInfos.empty() && "param info list not empty"); 10644 CanUseFirst = CanUseSecond = true; 10645 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10646 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10647 10648 // Fast path: if the first type doesn't have ext parameter infos, 10649 // we match if and only if the second type also doesn't have them. 10650 if (!FirstHasInfo && !SecondHasInfo) 10651 return true; 10652 10653 bool NeedParamInfo = false; 10654 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10655 : SecondFnType->getExtParameterInfos().size(); 10656 10657 for (size_t I = 0; I < E; ++I) { 10658 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10659 if (FirstHasInfo) 10660 FirstParam = FirstFnType->getExtParameterInfo(I); 10661 if (SecondHasInfo) 10662 SecondParam = SecondFnType->getExtParameterInfo(I); 10663 10664 // Cannot merge unless everything except the noescape flag matches. 10665 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10666 return false; 10667 10668 bool FirstNoEscape = FirstParam.isNoEscape(); 10669 bool SecondNoEscape = SecondParam.isNoEscape(); 10670 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10671 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10672 if (NewParamInfos.back().getOpaqueValue()) 10673 NeedParamInfo = true; 10674 if (FirstNoEscape != IsNoEscape) 10675 CanUseFirst = false; 10676 if (SecondNoEscape != IsNoEscape) 10677 CanUseSecond = false; 10678 } 10679 10680 if (!NeedParamInfo) 10681 NewParamInfos.clear(); 10682 10683 return true; 10684 } 10685 10686 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10687 ObjCLayouts[CD] = nullptr; 10688 } 10689 10690 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10691 /// 'RHS' attributes and returns the merged version; including for function 10692 /// return types. 10693 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10694 QualType LHSCan = getCanonicalType(LHS), 10695 RHSCan = getCanonicalType(RHS); 10696 // If two types are identical, they are compatible. 10697 if (LHSCan == RHSCan) 10698 return LHS; 10699 if (RHSCan->isFunctionType()) { 10700 if (!LHSCan->isFunctionType()) 10701 return {}; 10702 QualType OldReturnType = 10703 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10704 QualType NewReturnType = 10705 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10706 QualType ResReturnType = 10707 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10708 if (ResReturnType.isNull()) 10709 return {}; 10710 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10711 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10712 // In either case, use OldReturnType to build the new function type. 10713 const auto *F = LHS->castAs<FunctionType>(); 10714 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10715 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10716 EPI.ExtInfo = getFunctionExtInfo(LHS); 10717 QualType ResultType = 10718 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10719 return ResultType; 10720 } 10721 } 10722 return {}; 10723 } 10724 10725 // If the qualifiers are different, the types can still be merged. 10726 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10727 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10728 if (LQuals != RQuals) { 10729 // If any of these qualifiers are different, we have a type mismatch. 10730 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10731 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10732 return {}; 10733 10734 // Exactly one GC qualifier difference is allowed: __strong is 10735 // okay if the other type has no GC qualifier but is an Objective 10736 // C object pointer (i.e. implicitly strong by default). We fix 10737 // this by pretending that the unqualified type was actually 10738 // qualified __strong. 10739 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10740 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10741 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10742 10743 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10744 return {}; 10745 10746 if (GC_L == Qualifiers::Strong) 10747 return LHS; 10748 if (GC_R == Qualifiers::Strong) 10749 return RHS; 10750 return {}; 10751 } 10752 10753 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10754 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10755 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10756 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10757 if (ResQT == LHSBaseQT) 10758 return LHS; 10759 if (ResQT == RHSBaseQT) 10760 return RHS; 10761 } 10762 return {}; 10763 } 10764 10765 //===----------------------------------------------------------------------===// 10766 // Integer Predicates 10767 //===----------------------------------------------------------------------===// 10768 10769 unsigned ASTContext::getIntWidth(QualType T) const { 10770 if (const auto *ET = T->getAs<EnumType>()) 10771 T = ET->getDecl()->getIntegerType(); 10772 if (T->isBooleanType()) 10773 return 1; 10774 if (const auto *EIT = T->getAs<BitIntType>()) 10775 return EIT->getNumBits(); 10776 // For builtin types, just use the standard type sizing method 10777 return (unsigned)getTypeSize(T); 10778 } 10779 10780 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10781 assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 10782 "Unexpected type"); 10783 10784 // Turn <4 x signed int> -> <4 x unsigned int> 10785 if (const auto *VTy = T->getAs<VectorType>()) 10786 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 10787 VTy->getNumElements(), VTy->getVectorKind()); 10788 10789 // For _BitInt, return an unsigned _BitInt with same width. 10790 if (const auto *EITy = T->getAs<BitIntType>()) 10791 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 10792 10793 // For enums, get the underlying integer type of the enum, and let the general 10794 // integer type signchanging code handle it. 10795 if (const auto *ETy = T->getAs<EnumType>()) 10796 T = ETy->getDecl()->getIntegerType(); 10797 10798 switch (T->castAs<BuiltinType>()->getKind()) { 10799 case BuiltinType::Char_S: 10800 case BuiltinType::SChar: 10801 return UnsignedCharTy; 10802 case BuiltinType::Short: 10803 return UnsignedShortTy; 10804 case BuiltinType::Int: 10805 return UnsignedIntTy; 10806 case BuiltinType::Long: 10807 return UnsignedLongTy; 10808 case BuiltinType::LongLong: 10809 return UnsignedLongLongTy; 10810 case BuiltinType::Int128: 10811 return UnsignedInt128Ty; 10812 // wchar_t is special. It is either signed or not, but when it's signed, 10813 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 10814 // version of it's underlying type instead. 10815 case BuiltinType::WChar_S: 10816 return getUnsignedWCharType(); 10817 10818 case BuiltinType::ShortAccum: 10819 return UnsignedShortAccumTy; 10820 case BuiltinType::Accum: 10821 return UnsignedAccumTy; 10822 case BuiltinType::LongAccum: 10823 return UnsignedLongAccumTy; 10824 case BuiltinType::SatShortAccum: 10825 return SatUnsignedShortAccumTy; 10826 case BuiltinType::SatAccum: 10827 return SatUnsignedAccumTy; 10828 case BuiltinType::SatLongAccum: 10829 return SatUnsignedLongAccumTy; 10830 case BuiltinType::ShortFract: 10831 return UnsignedShortFractTy; 10832 case BuiltinType::Fract: 10833 return UnsignedFractTy; 10834 case BuiltinType::LongFract: 10835 return UnsignedLongFractTy; 10836 case BuiltinType::SatShortFract: 10837 return SatUnsignedShortFractTy; 10838 case BuiltinType::SatFract: 10839 return SatUnsignedFractTy; 10840 case BuiltinType::SatLongFract: 10841 return SatUnsignedLongFractTy; 10842 default: 10843 llvm_unreachable("Unexpected signed integer or fixed point type"); 10844 } 10845 } 10846 10847 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 10848 assert((T->hasUnsignedIntegerRepresentation() || 10849 T->isUnsignedFixedPointType()) && 10850 "Unexpected type"); 10851 10852 // Turn <4 x unsigned int> -> <4 x signed int> 10853 if (const auto *VTy = T->getAs<VectorType>()) 10854 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 10855 VTy->getNumElements(), VTy->getVectorKind()); 10856 10857 // For _BitInt, return a signed _BitInt with same width. 10858 if (const auto *EITy = T->getAs<BitIntType>()) 10859 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 10860 10861 // For enums, get the underlying integer type of the enum, and let the general 10862 // integer type signchanging code handle it. 10863 if (const auto *ETy = T->getAs<EnumType>()) 10864 T = ETy->getDecl()->getIntegerType(); 10865 10866 switch (T->castAs<BuiltinType>()->getKind()) { 10867 case BuiltinType::Char_U: 10868 case BuiltinType::UChar: 10869 return SignedCharTy; 10870 case BuiltinType::UShort: 10871 return ShortTy; 10872 case BuiltinType::UInt: 10873 return IntTy; 10874 case BuiltinType::ULong: 10875 return LongTy; 10876 case BuiltinType::ULongLong: 10877 return LongLongTy; 10878 case BuiltinType::UInt128: 10879 return Int128Ty; 10880 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 10881 // there's no matching "signed wchar_t". Therefore we return the signed 10882 // version of it's underlying type instead. 10883 case BuiltinType::WChar_U: 10884 return getSignedWCharType(); 10885 10886 case BuiltinType::UShortAccum: 10887 return ShortAccumTy; 10888 case BuiltinType::UAccum: 10889 return AccumTy; 10890 case BuiltinType::ULongAccum: 10891 return LongAccumTy; 10892 case BuiltinType::SatUShortAccum: 10893 return SatShortAccumTy; 10894 case BuiltinType::SatUAccum: 10895 return SatAccumTy; 10896 case BuiltinType::SatULongAccum: 10897 return SatLongAccumTy; 10898 case BuiltinType::UShortFract: 10899 return ShortFractTy; 10900 case BuiltinType::UFract: 10901 return FractTy; 10902 case BuiltinType::ULongFract: 10903 return LongFractTy; 10904 case BuiltinType::SatUShortFract: 10905 return SatShortFractTy; 10906 case BuiltinType::SatUFract: 10907 return SatFractTy; 10908 case BuiltinType::SatULongFract: 10909 return SatLongFractTy; 10910 default: 10911 llvm_unreachable("Unexpected unsigned integer or fixed point type"); 10912 } 10913 } 10914 10915 ASTMutationListener::~ASTMutationListener() = default; 10916 10917 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 10918 QualType ReturnType) {} 10919 10920 //===----------------------------------------------------------------------===// 10921 // Builtin Type Computation 10922 //===----------------------------------------------------------------------===// 10923 10924 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 10925 /// pointer over the consumed characters. This returns the resultant type. If 10926 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 10927 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 10928 /// a vector of "i*". 10929 /// 10930 /// RequiresICE is filled in on return to indicate whether the value is required 10931 /// to be an Integer Constant Expression. 10932 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 10933 ASTContext::GetBuiltinTypeError &Error, 10934 bool &RequiresICE, 10935 bool AllowTypeModifiers) { 10936 // Modifiers. 10937 int HowLong = 0; 10938 bool Signed = false, Unsigned = false; 10939 RequiresICE = false; 10940 10941 // Read the prefixed modifiers first. 10942 bool Done = false; 10943 #ifndef NDEBUG 10944 bool IsSpecial = false; 10945 #endif 10946 while (!Done) { 10947 switch (*Str++) { 10948 default: Done = true; --Str; break; 10949 case 'I': 10950 RequiresICE = true; 10951 break; 10952 case 'S': 10953 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 10954 assert(!Signed && "Can't use 'S' modifier multiple times!"); 10955 Signed = true; 10956 break; 10957 case 'U': 10958 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 10959 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 10960 Unsigned = true; 10961 break; 10962 case 'L': 10963 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 10964 assert(HowLong <= 2 && "Can't have LLLL modifier"); 10965 ++HowLong; 10966 break; 10967 case 'N': 10968 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 10969 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10970 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 10971 #ifndef NDEBUG 10972 IsSpecial = true; 10973 #endif 10974 if (Context.getTargetInfo().getLongWidth() == 32) 10975 ++HowLong; 10976 break; 10977 case 'W': 10978 // This modifier represents int64 type. 10979 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10980 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 10981 #ifndef NDEBUG 10982 IsSpecial = true; 10983 #endif 10984 switch (Context.getTargetInfo().getInt64Type()) { 10985 default: 10986 llvm_unreachable("Unexpected integer type"); 10987 case TargetInfo::SignedLong: 10988 HowLong = 1; 10989 break; 10990 case TargetInfo::SignedLongLong: 10991 HowLong = 2; 10992 break; 10993 } 10994 break; 10995 case 'Z': 10996 // This modifier represents int32 type. 10997 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10998 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 10999 #ifndef NDEBUG 11000 IsSpecial = true; 11001 #endif 11002 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 11003 default: 11004 llvm_unreachable("Unexpected integer type"); 11005 case TargetInfo::SignedInt: 11006 HowLong = 0; 11007 break; 11008 case TargetInfo::SignedLong: 11009 HowLong = 1; 11010 break; 11011 case TargetInfo::SignedLongLong: 11012 HowLong = 2; 11013 break; 11014 } 11015 break; 11016 case 'O': 11017 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11018 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 11019 #ifndef NDEBUG 11020 IsSpecial = true; 11021 #endif 11022 if (Context.getLangOpts().OpenCL) 11023 HowLong = 1; 11024 else 11025 HowLong = 2; 11026 break; 11027 } 11028 } 11029 11030 QualType Type; 11031 11032 // Read the base type. 11033 switch (*Str++) { 11034 default: llvm_unreachable("Unknown builtin type letter!"); 11035 case 'x': 11036 assert(HowLong == 0 && !Signed && !Unsigned && 11037 "Bad modifiers used with 'x'!"); 11038 Type = Context.Float16Ty; 11039 break; 11040 case 'y': 11041 assert(HowLong == 0 && !Signed && !Unsigned && 11042 "Bad modifiers used with 'y'!"); 11043 Type = Context.BFloat16Ty; 11044 break; 11045 case 'v': 11046 assert(HowLong == 0 && !Signed && !Unsigned && 11047 "Bad modifiers used with 'v'!"); 11048 Type = Context.VoidTy; 11049 break; 11050 case 'h': 11051 assert(HowLong == 0 && !Signed && !Unsigned && 11052 "Bad modifiers used with 'h'!"); 11053 Type = Context.HalfTy; 11054 break; 11055 case 'f': 11056 assert(HowLong == 0 && !Signed && !Unsigned && 11057 "Bad modifiers used with 'f'!"); 11058 Type = Context.FloatTy; 11059 break; 11060 case 'd': 11061 assert(HowLong < 3 && !Signed && !Unsigned && 11062 "Bad modifiers used with 'd'!"); 11063 if (HowLong == 1) 11064 Type = Context.LongDoubleTy; 11065 else if (HowLong == 2) 11066 Type = Context.Float128Ty; 11067 else 11068 Type = Context.DoubleTy; 11069 break; 11070 case 's': 11071 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11072 if (Unsigned) 11073 Type = Context.UnsignedShortTy; 11074 else 11075 Type = Context.ShortTy; 11076 break; 11077 case 'i': 11078 if (HowLong == 3) 11079 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11080 else if (HowLong == 2) 11081 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11082 else if (HowLong == 1) 11083 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11084 else 11085 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11086 break; 11087 case 'c': 11088 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11089 if (Signed) 11090 Type = Context.SignedCharTy; 11091 else if (Unsigned) 11092 Type = Context.UnsignedCharTy; 11093 else 11094 Type = Context.CharTy; 11095 break; 11096 case 'b': // boolean 11097 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11098 Type = Context.BoolTy; 11099 break; 11100 case 'z': // size_t. 11101 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11102 Type = Context.getSizeType(); 11103 break; 11104 case 'w': // wchar_t. 11105 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11106 Type = Context.getWideCharType(); 11107 break; 11108 case 'F': 11109 Type = Context.getCFConstantStringType(); 11110 break; 11111 case 'G': 11112 Type = Context.getObjCIdType(); 11113 break; 11114 case 'H': 11115 Type = Context.getObjCSelType(); 11116 break; 11117 case 'M': 11118 Type = Context.getObjCSuperType(); 11119 break; 11120 case 'a': 11121 Type = Context.getBuiltinVaListType(); 11122 assert(!Type.isNull() && "builtin va list type not initialized!"); 11123 break; 11124 case 'A': 11125 // This is a "reference" to a va_list; however, what exactly 11126 // this means depends on how va_list is defined. There are two 11127 // different kinds of va_list: ones passed by value, and ones 11128 // passed by reference. An example of a by-value va_list is 11129 // x86, where va_list is a char*. An example of by-ref va_list 11130 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11131 // we want this argument to be a char*&; for x86-64, we want 11132 // it to be a __va_list_tag*. 11133 Type = Context.getBuiltinVaListType(); 11134 assert(!Type.isNull() && "builtin va list type not initialized!"); 11135 if (Type->isArrayType()) 11136 Type = Context.getArrayDecayedType(Type); 11137 else 11138 Type = Context.getLValueReferenceType(Type); 11139 break; 11140 case 'q': { 11141 char *End; 11142 unsigned NumElements = strtoul(Str, &End, 10); 11143 assert(End != Str && "Missing vector size"); 11144 Str = End; 11145 11146 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11147 RequiresICE, false); 11148 assert(!RequiresICE && "Can't require vector ICE"); 11149 11150 Type = Context.getScalableVectorType(ElementType, NumElements); 11151 break; 11152 } 11153 case 'V': { 11154 char *End; 11155 unsigned NumElements = strtoul(Str, &End, 10); 11156 assert(End != Str && "Missing vector size"); 11157 Str = End; 11158 11159 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11160 RequiresICE, false); 11161 assert(!RequiresICE && "Can't require vector ICE"); 11162 11163 // TODO: No way to make AltiVec vectors in builtins yet. 11164 Type = Context.getVectorType(ElementType, NumElements, 11165 VectorType::GenericVector); 11166 break; 11167 } 11168 case 'E': { 11169 char *End; 11170 11171 unsigned NumElements = strtoul(Str, &End, 10); 11172 assert(End != Str && "Missing vector size"); 11173 11174 Str = End; 11175 11176 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11177 false); 11178 Type = Context.getExtVectorType(ElementType, NumElements); 11179 break; 11180 } 11181 case 'X': { 11182 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11183 false); 11184 assert(!RequiresICE && "Can't require complex ICE"); 11185 Type = Context.getComplexType(ElementType); 11186 break; 11187 } 11188 case 'Y': 11189 Type = Context.getPointerDiffType(); 11190 break; 11191 case 'P': 11192 Type = Context.getFILEType(); 11193 if (Type.isNull()) { 11194 Error = ASTContext::GE_Missing_stdio; 11195 return {}; 11196 } 11197 break; 11198 case 'J': 11199 if (Signed) 11200 Type = Context.getsigjmp_bufType(); 11201 else 11202 Type = Context.getjmp_bufType(); 11203 11204 if (Type.isNull()) { 11205 Error = ASTContext::GE_Missing_setjmp; 11206 return {}; 11207 } 11208 break; 11209 case 'K': 11210 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11211 Type = Context.getucontext_tType(); 11212 11213 if (Type.isNull()) { 11214 Error = ASTContext::GE_Missing_ucontext; 11215 return {}; 11216 } 11217 break; 11218 case 'p': 11219 Type = Context.getProcessIDType(); 11220 break; 11221 } 11222 11223 // If there are modifiers and if we're allowed to parse them, go for it. 11224 Done = !AllowTypeModifiers; 11225 while (!Done) { 11226 switch (char c = *Str++) { 11227 default: Done = true; --Str; break; 11228 case '*': 11229 case '&': { 11230 // Both pointers and references can have their pointee types 11231 // qualified with an address space. 11232 char *End; 11233 unsigned AddrSpace = strtoul(Str, &End, 10); 11234 if (End != Str) { 11235 // Note AddrSpace == 0 is not the same as an unspecified address space. 11236 Type = Context.getAddrSpaceQualType( 11237 Type, 11238 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11239 Str = End; 11240 } 11241 if (c == '*') 11242 Type = Context.getPointerType(Type); 11243 else 11244 Type = Context.getLValueReferenceType(Type); 11245 break; 11246 } 11247 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11248 case 'C': 11249 Type = Type.withConst(); 11250 break; 11251 case 'D': 11252 Type = Context.getVolatileType(Type); 11253 break; 11254 case 'R': 11255 Type = Type.withRestrict(); 11256 break; 11257 } 11258 } 11259 11260 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11261 "Integer constant 'I' type must be an integer"); 11262 11263 return Type; 11264 } 11265 11266 // On some targets such as PowerPC, some of the builtins are defined with custom 11267 // type descriptors for target-dependent types. These descriptors are decoded in 11268 // other functions, but it may be useful to be able to fall back to default 11269 // descriptor decoding to define builtins mixing target-dependent and target- 11270 // independent types. This function allows decoding one type descriptor with 11271 // default decoding. 11272 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11273 GetBuiltinTypeError &Error, bool &RequireICE, 11274 bool AllowTypeModifiers) const { 11275 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11276 } 11277 11278 /// GetBuiltinType - Return the type for the specified builtin. 11279 QualType ASTContext::GetBuiltinType(unsigned Id, 11280 GetBuiltinTypeError &Error, 11281 unsigned *IntegerConstantArgs) const { 11282 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11283 if (TypeStr[0] == '\0') { 11284 Error = GE_Missing_type; 11285 return {}; 11286 } 11287 11288 SmallVector<QualType, 8> ArgTypes; 11289 11290 bool RequiresICE = false; 11291 Error = GE_None; 11292 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11293 RequiresICE, true); 11294 if (Error != GE_None) 11295 return {}; 11296 11297 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11298 11299 while (TypeStr[0] && TypeStr[0] != '.') { 11300 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11301 if (Error != GE_None) 11302 return {}; 11303 11304 // If this argument is required to be an IntegerConstantExpression and the 11305 // caller cares, fill in the bitmask we return. 11306 if (RequiresICE && IntegerConstantArgs) 11307 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11308 11309 // Do array -> pointer decay. The builtin should use the decayed type. 11310 if (Ty->isArrayType()) 11311 Ty = getArrayDecayedType(Ty); 11312 11313 ArgTypes.push_back(Ty); 11314 } 11315 11316 if (Id == Builtin::BI__GetExceptionInfo) 11317 return {}; 11318 11319 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11320 "'.' should only occur at end of builtin type list!"); 11321 11322 bool Variadic = (TypeStr[0] == '.'); 11323 11324 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11325 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11326 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11327 11328 11329 // We really shouldn't be making a no-proto type here. 11330 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 11331 return getFunctionNoProtoType(ResType, EI); 11332 11333 FunctionProtoType::ExtProtoInfo EPI; 11334 EPI.ExtInfo = EI; 11335 EPI.Variadic = Variadic; 11336 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11337 EPI.ExceptionSpec.Type = 11338 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11339 11340 return getFunctionType(ResType, ArgTypes, EPI); 11341 } 11342 11343 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11344 const FunctionDecl *FD) { 11345 if (!FD->isExternallyVisible()) 11346 return GVA_Internal; 11347 11348 // Non-user-provided functions get emitted as weak definitions with every 11349 // use, no matter whether they've been explicitly instantiated etc. 11350 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 11351 if (!MD->isUserProvided()) 11352 return GVA_DiscardableODR; 11353 11354 GVALinkage External; 11355 switch (FD->getTemplateSpecializationKind()) { 11356 case TSK_Undeclared: 11357 case TSK_ExplicitSpecialization: 11358 External = GVA_StrongExternal; 11359 break; 11360 11361 case TSK_ExplicitInstantiationDefinition: 11362 return GVA_StrongODR; 11363 11364 // C++11 [temp.explicit]p10: 11365 // [ Note: The intent is that an inline function that is the subject of 11366 // an explicit instantiation declaration will still be implicitly 11367 // instantiated when used so that the body can be considered for 11368 // inlining, but that no out-of-line copy of the inline function would be 11369 // generated in the translation unit. -- end note ] 11370 case TSK_ExplicitInstantiationDeclaration: 11371 return GVA_AvailableExternally; 11372 11373 case TSK_ImplicitInstantiation: 11374 External = GVA_DiscardableODR; 11375 break; 11376 } 11377 11378 if (!FD->isInlined()) 11379 return External; 11380 11381 if ((!Context.getLangOpts().CPlusPlus && 11382 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11383 !FD->hasAttr<DLLExportAttr>()) || 11384 FD->hasAttr<GNUInlineAttr>()) { 11385 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11386 11387 // GNU or C99 inline semantics. Determine whether this symbol should be 11388 // externally visible. 11389 if (FD->isInlineDefinitionExternallyVisible()) 11390 return External; 11391 11392 // C99 inline semantics, where the symbol is not externally visible. 11393 return GVA_AvailableExternally; 11394 } 11395 11396 // Functions specified with extern and inline in -fms-compatibility mode 11397 // forcibly get emitted. While the body of the function cannot be later 11398 // replaced, the function definition cannot be discarded. 11399 if (FD->isMSExternInline()) 11400 return GVA_StrongODR; 11401 11402 return GVA_DiscardableODR; 11403 } 11404 11405 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11406 const Decl *D, GVALinkage L) { 11407 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11408 // dllexport/dllimport on inline functions. 11409 if (D->hasAttr<DLLImportAttr>()) { 11410 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11411 return GVA_AvailableExternally; 11412 } else if (D->hasAttr<DLLExportAttr>()) { 11413 if (L == GVA_DiscardableODR) 11414 return GVA_StrongODR; 11415 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11416 // Device-side functions with __global__ attribute must always be 11417 // visible externally so they can be launched from host. 11418 if (D->hasAttr<CUDAGlobalAttr>() && 11419 (L == GVA_DiscardableODR || L == GVA_Internal)) 11420 return GVA_StrongODR; 11421 // Single source offloading languages like CUDA/HIP need to be able to 11422 // access static device variables from host code of the same compilation 11423 // unit. This is done by externalizing the static variable with a shared 11424 // name between the host and device compilation which is the same for the 11425 // same compilation unit whereas different among different compilation 11426 // units. 11427 if (Context.shouldExternalize(D)) 11428 return GVA_StrongExternal; 11429 } 11430 return L; 11431 } 11432 11433 /// Adjust the GVALinkage for a declaration based on what an external AST source 11434 /// knows about whether there can be other definitions of this declaration. 11435 static GVALinkage 11436 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11437 GVALinkage L) { 11438 ExternalASTSource *Source = Ctx.getExternalSource(); 11439 if (!Source) 11440 return L; 11441 11442 switch (Source->hasExternalDefinitions(D)) { 11443 case ExternalASTSource::EK_Never: 11444 // Other translation units rely on us to provide the definition. 11445 if (L == GVA_DiscardableODR) 11446 return GVA_StrongODR; 11447 break; 11448 11449 case ExternalASTSource::EK_Always: 11450 return GVA_AvailableExternally; 11451 11452 case ExternalASTSource::EK_ReplyHazy: 11453 break; 11454 } 11455 return L; 11456 } 11457 11458 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11459 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11460 adjustGVALinkageForAttributes(*this, FD, 11461 basicGVALinkageForFunction(*this, FD))); 11462 } 11463 11464 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11465 const VarDecl *VD) { 11466 if (!VD->isExternallyVisible()) 11467 return GVA_Internal; 11468 11469 if (VD->isStaticLocal()) { 11470 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11471 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11472 LexicalContext = LexicalContext->getLexicalParent(); 11473 11474 // ObjC Blocks can create local variables that don't have a FunctionDecl 11475 // LexicalContext. 11476 if (!LexicalContext) 11477 return GVA_DiscardableODR; 11478 11479 // Otherwise, let the static local variable inherit its linkage from the 11480 // nearest enclosing function. 11481 auto StaticLocalLinkage = 11482 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11483 11484 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11485 // be emitted in any object with references to the symbol for the object it 11486 // contains, whether inline or out-of-line." 11487 // Similar behavior is observed with MSVC. An alternative ABI could use 11488 // StrongODR/AvailableExternally to match the function, but none are 11489 // known/supported currently. 11490 if (StaticLocalLinkage == GVA_StrongODR || 11491 StaticLocalLinkage == GVA_AvailableExternally) 11492 return GVA_DiscardableODR; 11493 return StaticLocalLinkage; 11494 } 11495 11496 // MSVC treats in-class initialized static data members as definitions. 11497 // By giving them non-strong linkage, out-of-line definitions won't 11498 // cause link errors. 11499 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11500 return GVA_DiscardableODR; 11501 11502 // Most non-template variables have strong linkage; inline variables are 11503 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11504 GVALinkage StrongLinkage; 11505 switch (Context.getInlineVariableDefinitionKind(VD)) { 11506 case ASTContext::InlineVariableDefinitionKind::None: 11507 StrongLinkage = GVA_StrongExternal; 11508 break; 11509 case ASTContext::InlineVariableDefinitionKind::Weak: 11510 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11511 StrongLinkage = GVA_DiscardableODR; 11512 break; 11513 case ASTContext::InlineVariableDefinitionKind::Strong: 11514 StrongLinkage = GVA_StrongODR; 11515 break; 11516 } 11517 11518 switch (VD->getTemplateSpecializationKind()) { 11519 case TSK_Undeclared: 11520 return StrongLinkage; 11521 11522 case TSK_ExplicitSpecialization: 11523 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11524 VD->isStaticDataMember() 11525 ? GVA_StrongODR 11526 : StrongLinkage; 11527 11528 case TSK_ExplicitInstantiationDefinition: 11529 return GVA_StrongODR; 11530 11531 case TSK_ExplicitInstantiationDeclaration: 11532 return GVA_AvailableExternally; 11533 11534 case TSK_ImplicitInstantiation: 11535 return GVA_DiscardableODR; 11536 } 11537 11538 llvm_unreachable("Invalid Linkage!"); 11539 } 11540 11541 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { 11542 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11543 adjustGVALinkageForAttributes(*this, VD, 11544 basicGVALinkageForVariable(*this, VD))); 11545 } 11546 11547 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11548 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11549 if (!VD->isFileVarDecl()) 11550 return false; 11551 // Global named register variables (GNU extension) are never emitted. 11552 if (VD->getStorageClass() == SC_Register) 11553 return false; 11554 if (VD->getDescribedVarTemplate() || 11555 isa<VarTemplatePartialSpecializationDecl>(VD)) 11556 return false; 11557 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11558 // We never need to emit an uninstantiated function template. 11559 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11560 return false; 11561 } else if (isa<PragmaCommentDecl>(D)) 11562 return true; 11563 else if (isa<PragmaDetectMismatchDecl>(D)) 11564 return true; 11565 else if (isa<OMPRequiresDecl>(D)) 11566 return true; 11567 else if (isa<OMPThreadPrivateDecl>(D)) 11568 return !D->getDeclContext()->isDependentContext(); 11569 else if (isa<OMPAllocateDecl>(D)) 11570 return !D->getDeclContext()->isDependentContext(); 11571 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11572 return !D->getDeclContext()->isDependentContext(); 11573 else if (isa<ImportDecl>(D)) 11574 return true; 11575 else 11576 return false; 11577 11578 // If this is a member of a class template, we do not need to emit it. 11579 if (D->getDeclContext()->isDependentContext()) 11580 return false; 11581 11582 // Weak references don't produce any output by themselves. 11583 if (D->hasAttr<WeakRefAttr>()) 11584 return false; 11585 11586 // Aliases and used decls are required. 11587 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11588 return true; 11589 11590 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11591 // Forward declarations aren't required. 11592 if (!FD->doesThisDeclarationHaveABody()) 11593 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11594 11595 // Constructors and destructors are required. 11596 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11597 return true; 11598 11599 // The key function for a class is required. This rule only comes 11600 // into play when inline functions can be key functions, though. 11601 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11602 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11603 const CXXRecordDecl *RD = MD->getParent(); 11604 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11605 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11606 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11607 return true; 11608 } 11609 } 11610 } 11611 11612 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11613 11614 // static, static inline, always_inline, and extern inline functions can 11615 // always be deferred. Normal inline functions can be deferred in C99/C++. 11616 // Implicit template instantiations can also be deferred in C++. 11617 return !isDiscardableGVALinkage(Linkage); 11618 } 11619 11620 const auto *VD = cast<VarDecl>(D); 11621 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11622 11623 // If the decl is marked as `declare target to`, it should be emitted for the 11624 // host and for the device. 11625 if (LangOpts.OpenMP && 11626 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11627 return true; 11628 11629 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11630 !isMSStaticDataMemberInlineDefinition(VD)) 11631 return false; 11632 11633 // Variables that can be needed in other TUs are required. 11634 auto Linkage = GetGVALinkageForVariable(VD); 11635 if (!isDiscardableGVALinkage(Linkage)) 11636 return true; 11637 11638 // We never need to emit a variable that is available in another TU. 11639 if (Linkage == GVA_AvailableExternally) 11640 return false; 11641 11642 // Variables that have destruction with side-effects are required. 11643 if (VD->needsDestruction(*this)) 11644 return true; 11645 11646 // Variables that have initialization with side-effects are required. 11647 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11648 // We can get a value-dependent initializer during error recovery. 11649 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11650 return true; 11651 11652 // Likewise, variables with tuple-like bindings are required if their 11653 // bindings have side-effects. 11654 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11655 for (const auto *BD : DD->bindings()) 11656 if (const auto *BindingVD = BD->getHoldingVar()) 11657 if (DeclMustBeEmitted(BindingVD)) 11658 return true; 11659 11660 return false; 11661 } 11662 11663 void ASTContext::forEachMultiversionedFunctionVersion( 11664 const FunctionDecl *FD, 11665 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11666 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11667 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11668 FD = FD->getMostRecentDecl(); 11669 // FIXME: The order of traversal here matters and depends on the order of 11670 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11671 // shouldn't rely on that. 11672 for (auto *CurDecl : 11673 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11674 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11675 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11676 std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) { 11677 SeenDecls.insert(CurFD); 11678 Pred(CurFD); 11679 } 11680 } 11681 } 11682 11683 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11684 bool IsCXXMethod, 11685 bool IsBuiltin) const { 11686 // Pass through to the C++ ABI object 11687 if (IsCXXMethod) 11688 return ABI->getDefaultMethodCallConv(IsVariadic); 11689 11690 // Builtins ignore user-specified default calling convention and remain the 11691 // Target's default calling convention. 11692 if (!IsBuiltin) { 11693 switch (LangOpts.getDefaultCallingConv()) { 11694 case LangOptions::DCC_None: 11695 break; 11696 case LangOptions::DCC_CDecl: 11697 return CC_C; 11698 case LangOptions::DCC_FastCall: 11699 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11700 return CC_X86FastCall; 11701 break; 11702 case LangOptions::DCC_StdCall: 11703 if (!IsVariadic) 11704 return CC_X86StdCall; 11705 break; 11706 case LangOptions::DCC_VectorCall: 11707 // __vectorcall cannot be applied to variadic functions. 11708 if (!IsVariadic) 11709 return CC_X86VectorCall; 11710 break; 11711 case LangOptions::DCC_RegCall: 11712 // __regcall cannot be applied to variadic functions. 11713 if (!IsVariadic) 11714 return CC_X86RegCall; 11715 break; 11716 } 11717 } 11718 return Target->getDefaultCallingConv(); 11719 } 11720 11721 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11722 // Pass through to the C++ ABI object 11723 return ABI->isNearlyEmpty(RD); 11724 } 11725 11726 VTableContextBase *ASTContext::getVTableContext() { 11727 if (!VTContext.get()) { 11728 auto ABI = Target->getCXXABI(); 11729 if (ABI.isMicrosoft()) 11730 VTContext.reset(new MicrosoftVTableContext(*this)); 11731 else { 11732 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11733 ? ItaniumVTableContext::Relative 11734 : ItaniumVTableContext::Pointer; 11735 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 11736 } 11737 } 11738 return VTContext.get(); 11739 } 11740 11741 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 11742 if (!T) 11743 T = Target; 11744 switch (T->getCXXABI().getKind()) { 11745 case TargetCXXABI::AppleARM64: 11746 case TargetCXXABI::Fuchsia: 11747 case TargetCXXABI::GenericAArch64: 11748 case TargetCXXABI::GenericItanium: 11749 case TargetCXXABI::GenericARM: 11750 case TargetCXXABI::GenericMIPS: 11751 case TargetCXXABI::iOS: 11752 case TargetCXXABI::WebAssembly: 11753 case TargetCXXABI::WatchOS: 11754 case TargetCXXABI::XL: 11755 return ItaniumMangleContext::create(*this, getDiagnostics()); 11756 case TargetCXXABI::Microsoft: 11757 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11758 } 11759 llvm_unreachable("Unsupported ABI"); 11760 } 11761 11762 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 11763 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 11764 "Device mangle context does not support Microsoft mangling."); 11765 switch (T.getCXXABI().getKind()) { 11766 case TargetCXXABI::AppleARM64: 11767 case TargetCXXABI::Fuchsia: 11768 case TargetCXXABI::GenericAArch64: 11769 case TargetCXXABI::GenericItanium: 11770 case TargetCXXABI::GenericARM: 11771 case TargetCXXABI::GenericMIPS: 11772 case TargetCXXABI::iOS: 11773 case TargetCXXABI::WebAssembly: 11774 case TargetCXXABI::WatchOS: 11775 case TargetCXXABI::XL: 11776 return ItaniumMangleContext::create( 11777 *this, getDiagnostics(), 11778 [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> { 11779 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 11780 return RD->getDeviceLambdaManglingNumber(); 11781 return llvm::None; 11782 }, 11783 /*IsAux=*/true); 11784 case TargetCXXABI::Microsoft: 11785 return MicrosoftMangleContext::create(*this, getDiagnostics(), 11786 /*IsAux=*/true); 11787 } 11788 llvm_unreachable("Unsupported ABI"); 11789 } 11790 11791 CXXABI::~CXXABI() = default; 11792 11793 size_t ASTContext::getSideTableAllocatedMemory() const { 11794 return ASTRecordLayouts.getMemorySize() + 11795 llvm::capacity_in_bytes(ObjCLayouts) + 11796 llvm::capacity_in_bytes(KeyFunctions) + 11797 llvm::capacity_in_bytes(ObjCImpls) + 11798 llvm::capacity_in_bytes(BlockVarCopyInits) + 11799 llvm::capacity_in_bytes(DeclAttrs) + 11800 llvm::capacity_in_bytes(TemplateOrInstantiation) + 11801 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 11802 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 11803 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 11804 llvm::capacity_in_bytes(OverriddenMethods) + 11805 llvm::capacity_in_bytes(Types) + 11806 llvm::capacity_in_bytes(VariableArrayTypes); 11807 } 11808 11809 /// getIntTypeForBitwidth - 11810 /// sets integer QualTy according to specified details: 11811 /// bitwidth, signed/unsigned. 11812 /// Returns empty type if there is no appropriate target types. 11813 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 11814 unsigned Signed) const { 11815 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 11816 CanQualType QualTy = getFromTargetType(Ty); 11817 if (!QualTy && DestWidth == 128) 11818 return Signed ? Int128Ty : UnsignedInt128Ty; 11819 return QualTy; 11820 } 11821 11822 /// getRealTypeForBitwidth - 11823 /// sets floating point QualTy according to specified bitwidth. 11824 /// Returns empty type if there is no appropriate target types. 11825 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 11826 FloatModeKind ExplicitType) const { 11827 FloatModeKind Ty = 11828 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 11829 switch (Ty) { 11830 case FloatModeKind::Half: 11831 return HalfTy; 11832 case FloatModeKind::Float: 11833 return FloatTy; 11834 case FloatModeKind::Double: 11835 return DoubleTy; 11836 case FloatModeKind::LongDouble: 11837 return LongDoubleTy; 11838 case FloatModeKind::Float128: 11839 return Float128Ty; 11840 case FloatModeKind::Ibm128: 11841 return Ibm128Ty; 11842 case FloatModeKind::NoFloat: 11843 return {}; 11844 } 11845 11846 llvm_unreachable("Unhandled TargetInfo::RealType value"); 11847 } 11848 11849 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 11850 if (Number > 1) 11851 MangleNumbers[ND] = Number; 11852 } 11853 11854 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 11855 bool ForAuxTarget) const { 11856 auto I = MangleNumbers.find(ND); 11857 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 11858 // CUDA/HIP host compilation encodes host and device mangling numbers 11859 // as lower and upper half of 32 bit integer. 11860 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 11861 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 11862 } else { 11863 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 11864 "number for aux target"); 11865 } 11866 return Res > 1 ? Res : 1; 11867 } 11868 11869 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 11870 if (Number > 1) 11871 StaticLocalNumbers[VD] = Number; 11872 } 11873 11874 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 11875 auto I = StaticLocalNumbers.find(VD); 11876 return I != StaticLocalNumbers.end() ? I->second : 1; 11877 } 11878 11879 MangleNumberingContext & 11880 ASTContext::getManglingNumberContext(const DeclContext *DC) { 11881 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11882 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 11883 if (!MCtx) 11884 MCtx = createMangleNumberingContext(); 11885 return *MCtx; 11886 } 11887 11888 MangleNumberingContext & 11889 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 11890 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11891 std::unique_ptr<MangleNumberingContext> &MCtx = 11892 ExtraMangleNumberingContexts[D]; 11893 if (!MCtx) 11894 MCtx = createMangleNumberingContext(); 11895 return *MCtx; 11896 } 11897 11898 std::unique_ptr<MangleNumberingContext> 11899 ASTContext::createMangleNumberingContext() const { 11900 return ABI->createMangleNumberingContext(); 11901 } 11902 11903 const CXXConstructorDecl * 11904 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 11905 return ABI->getCopyConstructorForExceptionObject( 11906 cast<CXXRecordDecl>(RD->getFirstDecl())); 11907 } 11908 11909 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 11910 CXXConstructorDecl *CD) { 11911 return ABI->addCopyConstructorForExceptionObject( 11912 cast<CXXRecordDecl>(RD->getFirstDecl()), 11913 cast<CXXConstructorDecl>(CD->getFirstDecl())); 11914 } 11915 11916 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 11917 TypedefNameDecl *DD) { 11918 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 11919 } 11920 11921 TypedefNameDecl * 11922 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 11923 return ABI->getTypedefNameForUnnamedTagDecl(TD); 11924 } 11925 11926 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 11927 DeclaratorDecl *DD) { 11928 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 11929 } 11930 11931 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 11932 return ABI->getDeclaratorForUnnamedTagDecl(TD); 11933 } 11934 11935 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 11936 ParamIndices[D] = index; 11937 } 11938 11939 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 11940 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 11941 assert(I != ParamIndices.end() && 11942 "ParmIndices lacks entry set by ParmVarDecl"); 11943 return I->second; 11944 } 11945 11946 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 11947 unsigned Length) const { 11948 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 11949 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 11950 EltTy = EltTy.withConst(); 11951 11952 EltTy = adjustStringLiteralBaseType(EltTy); 11953 11954 // Get an array type for the string, according to C99 6.4.5. This includes 11955 // the null terminator character. 11956 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 11957 ArrayType::Normal, /*IndexTypeQuals*/ 0); 11958 } 11959 11960 StringLiteral * 11961 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 11962 StringLiteral *&Result = StringLiteralCache[Key]; 11963 if (!Result) 11964 Result = StringLiteral::Create( 11965 *this, Key, StringLiteral::Ordinary, 11966 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 11967 SourceLocation()); 11968 return Result; 11969 } 11970 11971 MSGuidDecl * 11972 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 11973 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 11974 11975 llvm::FoldingSetNodeID ID; 11976 MSGuidDecl::Profile(ID, Parts); 11977 11978 void *InsertPos; 11979 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 11980 return Existing; 11981 11982 QualType GUIDType = getMSGuidType().withConst(); 11983 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 11984 MSGuidDecls.InsertNode(New, InsertPos); 11985 return New; 11986 } 11987 11988 UnnamedGlobalConstantDecl * 11989 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 11990 const APValue &APVal) const { 11991 llvm::FoldingSetNodeID ID; 11992 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 11993 11994 void *InsertPos; 11995 if (UnnamedGlobalConstantDecl *Existing = 11996 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 11997 return Existing; 11998 11999 UnnamedGlobalConstantDecl *New = 12000 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 12001 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 12002 return New; 12003 } 12004 12005 TemplateParamObjectDecl * 12006 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 12007 assert(T->isRecordType() && "template param object of unexpected type"); 12008 12009 // C++ [temp.param]p8: 12010 // [...] a static storage duration object of type 'const T' [...] 12011 T.addConst(); 12012 12013 llvm::FoldingSetNodeID ID; 12014 TemplateParamObjectDecl::Profile(ID, T, V); 12015 12016 void *InsertPos; 12017 if (TemplateParamObjectDecl *Existing = 12018 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 12019 return Existing; 12020 12021 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 12022 TemplateParamObjectDecls.InsertNode(New, InsertPos); 12023 return New; 12024 } 12025 12026 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 12027 const llvm::Triple &T = getTargetInfo().getTriple(); 12028 if (!T.isOSDarwin()) 12029 return false; 12030 12031 if (!(T.isiOS() && T.isOSVersionLT(7)) && 12032 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 12033 return false; 12034 12035 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 12036 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 12037 uint64_t Size = sizeChars.getQuantity(); 12038 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 12039 unsigned Align = alignChars.getQuantity(); 12040 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 12041 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 12042 } 12043 12044 bool 12045 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 12046 const ObjCMethodDecl *MethodImpl) { 12047 // No point trying to match an unavailable/deprecated mothod. 12048 if (MethodDecl->hasAttr<UnavailableAttr>() 12049 || MethodDecl->hasAttr<DeprecatedAttr>()) 12050 return false; 12051 if (MethodDecl->getObjCDeclQualifier() != 12052 MethodImpl->getObjCDeclQualifier()) 12053 return false; 12054 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 12055 return false; 12056 12057 if (MethodDecl->param_size() != MethodImpl->param_size()) 12058 return false; 12059 12060 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 12061 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 12062 EF = MethodDecl->param_end(); 12063 IM != EM && IF != EF; ++IM, ++IF) { 12064 const ParmVarDecl *DeclVar = (*IF); 12065 const ParmVarDecl *ImplVar = (*IM); 12066 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 12067 return false; 12068 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 12069 return false; 12070 } 12071 12072 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12073 } 12074 12075 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12076 LangAS AS; 12077 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12078 AS = LangAS::Default; 12079 else 12080 AS = QT->getPointeeType().getAddressSpace(); 12081 12082 return getTargetInfo().getNullPointerValue(AS); 12083 } 12084 12085 unsigned ASTContext::getTargetAddressSpace(QualType T) const { 12086 // Return the address space for the type. If the type is a 12087 // function type without an address space qualifier, the 12088 // program address space is used. Otherwise, the target picks 12089 // the best address space based on the type information 12090 return T->isFunctionType() && !T.hasAddressSpace() 12091 ? getTargetInfo().getProgramAddressSpace() 12092 : getTargetAddressSpace(T.getQualifiers()); 12093 } 12094 12095 unsigned ASTContext::getTargetAddressSpace(Qualifiers Q) const { 12096 return getTargetAddressSpace(Q.getAddressSpace()); 12097 } 12098 12099 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12100 if (isTargetAddressSpace(AS)) 12101 return toTargetAddressSpace(AS); 12102 else 12103 return (*AddrSpaceMap)[(unsigned)AS]; 12104 } 12105 12106 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 12107 assert(Ty->isFixedPointType()); 12108 12109 if (Ty->isSaturatedFixedPointType()) return Ty; 12110 12111 switch (Ty->castAs<BuiltinType>()->getKind()) { 12112 default: 12113 llvm_unreachable("Not a fixed point type!"); 12114 case BuiltinType::ShortAccum: 12115 return SatShortAccumTy; 12116 case BuiltinType::Accum: 12117 return SatAccumTy; 12118 case BuiltinType::LongAccum: 12119 return SatLongAccumTy; 12120 case BuiltinType::UShortAccum: 12121 return SatUnsignedShortAccumTy; 12122 case BuiltinType::UAccum: 12123 return SatUnsignedAccumTy; 12124 case BuiltinType::ULongAccum: 12125 return SatUnsignedLongAccumTy; 12126 case BuiltinType::ShortFract: 12127 return SatShortFractTy; 12128 case BuiltinType::Fract: 12129 return SatFractTy; 12130 case BuiltinType::LongFract: 12131 return SatLongFractTy; 12132 case BuiltinType::UShortFract: 12133 return SatUnsignedShortFractTy; 12134 case BuiltinType::UFract: 12135 return SatUnsignedFractTy; 12136 case BuiltinType::ULongFract: 12137 return SatUnsignedLongFractTy; 12138 } 12139 } 12140 12141 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 12142 if (LangOpts.OpenCL) 12143 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 12144 12145 if (LangOpts.CUDA) 12146 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 12147 12148 return getLangASFromTargetAS(AS); 12149 } 12150 12151 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 12152 // doesn't include ASTContext.h 12153 template 12154 clang::LazyGenerationalUpdatePtr< 12155 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 12156 clang::LazyGenerationalUpdatePtr< 12157 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 12158 const clang::ASTContext &Ctx, Decl *Value); 12159 12160 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 12161 assert(Ty->isFixedPointType()); 12162 12163 const TargetInfo &Target = getTargetInfo(); 12164 switch (Ty->castAs<BuiltinType>()->getKind()) { 12165 default: 12166 llvm_unreachable("Not a fixed point type!"); 12167 case BuiltinType::ShortAccum: 12168 case BuiltinType::SatShortAccum: 12169 return Target.getShortAccumScale(); 12170 case BuiltinType::Accum: 12171 case BuiltinType::SatAccum: 12172 return Target.getAccumScale(); 12173 case BuiltinType::LongAccum: 12174 case BuiltinType::SatLongAccum: 12175 return Target.getLongAccumScale(); 12176 case BuiltinType::UShortAccum: 12177 case BuiltinType::SatUShortAccum: 12178 return Target.getUnsignedShortAccumScale(); 12179 case BuiltinType::UAccum: 12180 case BuiltinType::SatUAccum: 12181 return Target.getUnsignedAccumScale(); 12182 case BuiltinType::ULongAccum: 12183 case BuiltinType::SatULongAccum: 12184 return Target.getUnsignedLongAccumScale(); 12185 case BuiltinType::ShortFract: 12186 case BuiltinType::SatShortFract: 12187 return Target.getShortFractScale(); 12188 case BuiltinType::Fract: 12189 case BuiltinType::SatFract: 12190 return Target.getFractScale(); 12191 case BuiltinType::LongFract: 12192 case BuiltinType::SatLongFract: 12193 return Target.getLongFractScale(); 12194 case BuiltinType::UShortFract: 12195 case BuiltinType::SatUShortFract: 12196 return Target.getUnsignedShortFractScale(); 12197 case BuiltinType::UFract: 12198 case BuiltinType::SatUFract: 12199 return Target.getUnsignedFractScale(); 12200 case BuiltinType::ULongFract: 12201 case BuiltinType::SatULongFract: 12202 return Target.getUnsignedLongFractScale(); 12203 } 12204 } 12205 12206 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 12207 assert(Ty->isFixedPointType()); 12208 12209 const TargetInfo &Target = getTargetInfo(); 12210 switch (Ty->castAs<BuiltinType>()->getKind()) { 12211 default: 12212 llvm_unreachable("Not a fixed point type!"); 12213 case BuiltinType::ShortAccum: 12214 case BuiltinType::SatShortAccum: 12215 return Target.getShortAccumIBits(); 12216 case BuiltinType::Accum: 12217 case BuiltinType::SatAccum: 12218 return Target.getAccumIBits(); 12219 case BuiltinType::LongAccum: 12220 case BuiltinType::SatLongAccum: 12221 return Target.getLongAccumIBits(); 12222 case BuiltinType::UShortAccum: 12223 case BuiltinType::SatUShortAccum: 12224 return Target.getUnsignedShortAccumIBits(); 12225 case BuiltinType::UAccum: 12226 case BuiltinType::SatUAccum: 12227 return Target.getUnsignedAccumIBits(); 12228 case BuiltinType::ULongAccum: 12229 case BuiltinType::SatULongAccum: 12230 return Target.getUnsignedLongAccumIBits(); 12231 case BuiltinType::ShortFract: 12232 case BuiltinType::SatShortFract: 12233 case BuiltinType::Fract: 12234 case BuiltinType::SatFract: 12235 case BuiltinType::LongFract: 12236 case BuiltinType::SatLongFract: 12237 case BuiltinType::UShortFract: 12238 case BuiltinType::SatUShortFract: 12239 case BuiltinType::UFract: 12240 case BuiltinType::SatUFract: 12241 case BuiltinType::ULongFract: 12242 case BuiltinType::SatULongFract: 12243 return 0; 12244 } 12245 } 12246 12247 llvm::FixedPointSemantics 12248 ASTContext::getFixedPointSemantics(QualType Ty) const { 12249 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 12250 "Can only get the fixed point semantics for a " 12251 "fixed point or integer type."); 12252 if (Ty->isIntegerType()) 12253 return llvm::FixedPointSemantics::GetIntegerSemantics( 12254 getIntWidth(Ty), Ty->isSignedIntegerType()); 12255 12256 bool isSigned = Ty->isSignedFixedPointType(); 12257 return llvm::FixedPointSemantics( 12258 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 12259 Ty->isSaturatedFixedPointType(), 12260 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 12261 } 12262 12263 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 12264 assert(Ty->isFixedPointType()); 12265 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 12266 } 12267 12268 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 12269 assert(Ty->isFixedPointType()); 12270 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 12271 } 12272 12273 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 12274 assert(Ty->isUnsignedFixedPointType() && 12275 "Expected unsigned fixed point type"); 12276 12277 switch (Ty->castAs<BuiltinType>()->getKind()) { 12278 case BuiltinType::UShortAccum: 12279 return ShortAccumTy; 12280 case BuiltinType::UAccum: 12281 return AccumTy; 12282 case BuiltinType::ULongAccum: 12283 return LongAccumTy; 12284 case BuiltinType::SatUShortAccum: 12285 return SatShortAccumTy; 12286 case BuiltinType::SatUAccum: 12287 return SatAccumTy; 12288 case BuiltinType::SatULongAccum: 12289 return SatLongAccumTy; 12290 case BuiltinType::UShortFract: 12291 return ShortFractTy; 12292 case BuiltinType::UFract: 12293 return FractTy; 12294 case BuiltinType::ULongFract: 12295 return LongFractTy; 12296 case BuiltinType::SatUShortFract: 12297 return SatShortFractTy; 12298 case BuiltinType::SatUFract: 12299 return SatFractTy; 12300 case BuiltinType::SatULongFract: 12301 return SatLongFractTy; 12302 default: 12303 llvm_unreachable("Unexpected unsigned fixed point type"); 12304 } 12305 } 12306 12307 ParsedTargetAttr 12308 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 12309 assert(TD != nullptr); 12310 ParsedTargetAttr ParsedAttr = TD->parse(); 12311 12312 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 12313 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 12314 }); 12315 return ParsedAttr; 12316 } 12317 12318 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 12319 const FunctionDecl *FD) const { 12320 if (FD) 12321 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 12322 else 12323 Target->initFeatureMap(FeatureMap, getDiagnostics(), 12324 Target->getTargetOpts().CPU, 12325 Target->getTargetOpts().Features); 12326 } 12327 12328 // Fills in the supplied string map with the set of target features for the 12329 // passed in function. 12330 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 12331 GlobalDecl GD) const { 12332 StringRef TargetCPU = Target->getTargetOpts().CPU; 12333 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 12334 if (const auto *TD = FD->getAttr<TargetAttr>()) { 12335 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 12336 12337 // Make a copy of the features as passed on the command line into the 12338 // beginning of the additional features from the function to override. 12339 ParsedAttr.Features.insert( 12340 ParsedAttr.Features.begin(), 12341 Target->getTargetOpts().FeaturesAsWritten.begin(), 12342 Target->getTargetOpts().FeaturesAsWritten.end()); 12343 12344 if (ParsedAttr.Architecture != "" && 12345 Target->isValidCPUName(ParsedAttr.Architecture)) 12346 TargetCPU = ParsedAttr.Architecture; 12347 12348 // Now populate the feature map, first with the TargetCPU which is either 12349 // the default or a new one from the target attribute string. Then we'll use 12350 // the passed in features (FeaturesAsWritten) along with the new ones from 12351 // the attribute. 12352 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 12353 ParsedAttr.Features); 12354 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 12355 llvm::SmallVector<StringRef, 32> FeaturesTmp; 12356 Target->getCPUSpecificCPUDispatchFeatures( 12357 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 12358 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 12359 Features.insert(Features.begin(), 12360 Target->getTargetOpts().FeaturesAsWritten.begin(), 12361 Target->getTargetOpts().FeaturesAsWritten.end()); 12362 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 12363 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 12364 std::vector<std::string> Features; 12365 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 12366 if (VersionStr.startswith("arch=")) 12367 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 12368 else if (VersionStr != "default") 12369 Features.push_back((StringRef{"+"} + VersionStr).str()); 12370 12371 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 12372 } else { 12373 FeatureMap = Target->getTargetOpts().FeatureMap; 12374 } 12375 } 12376 12377 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 12378 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 12379 return *OMPTraitInfoVector.back(); 12380 } 12381 12382 const StreamingDiagnostic &clang:: 12383 operator<<(const StreamingDiagnostic &DB, 12384 const ASTContext::SectionInfo &Section) { 12385 if (Section.Decl) 12386 return DB << Section.Decl; 12387 return DB << "a prior #pragma section"; 12388 } 12389 12390 bool ASTContext::mayExternalize(const Decl *D) const { 12391 bool IsStaticVar = 12392 isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static; 12393 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 12394 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 12395 (D->hasAttr<CUDAConstantAttr>() && 12396 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 12397 // CUDA/HIP: static managed variables need to be externalized since it is 12398 // a declaration in IR, therefore cannot have internal linkage. Kernels in 12399 // anonymous name space needs to be externalized to avoid duplicate symbols. 12400 return (IsStaticVar && 12401 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 12402 (D->hasAttr<CUDAGlobalAttr>() && 12403 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 12404 GVA_Internal); 12405 } 12406 12407 bool ASTContext::shouldExternalize(const Decl *D) const { 12408 return mayExternalize(D) && 12409 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 12410 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 12411 } 12412 12413 StringRef ASTContext::getCUIDHash() const { 12414 if (!CUIDHash.empty()) 12415 return CUIDHash; 12416 if (LangOpts.CUID.empty()) 12417 return StringRef(); 12418 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 12419 return CUIDHash; 12420 } 12421