1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/SourceLocation.h" 62 #include "clang/Basic/SourceManager.h" 63 #include "clang/Basic/Specifiers.h" 64 #include "clang/Basic/TargetCXXABI.h" 65 #include "clang/Basic/TargetInfo.h" 66 #include "clang/Basic/XRayLists.h" 67 #include "llvm/ADT/APFixedPoint.h" 68 #include "llvm/ADT/APInt.h" 69 #include "llvm/ADT/APSInt.h" 70 #include "llvm/ADT/ArrayRef.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/DenseSet.h" 73 #include "llvm/ADT/FoldingSet.h" 74 #include "llvm/ADT/None.h" 75 #include "llvm/ADT/Optional.h" 76 #include "llvm/ADT/PointerUnion.h" 77 #include "llvm/ADT/STLExtras.h" 78 #include "llvm/ADT/SmallPtrSet.h" 79 #include "llvm/ADT/SmallVector.h" 80 #include "llvm/ADT/StringExtras.h" 81 #include "llvm/ADT/StringRef.h" 82 #include "llvm/ADT/Triple.h" 83 #include "llvm/Support/Capacity.h" 84 #include "llvm/Support/Casting.h" 85 #include "llvm/Support/Compiler.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/MD5.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace clang; 102 103 enum FloatingRank { 104 BFloat16Rank, Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank 105 }; 106 107 /// \returns location that is relevant when searching for Doc comments related 108 /// to \p D. 109 static SourceLocation getDeclLocForCommentSearch(const Decl *D, 110 SourceManager &SourceMgr) { 111 assert(D); 112 113 // User can not attach documentation to implicit declarations. 114 if (D->isImplicit()) 115 return {}; 116 117 // User can not attach documentation to implicit instantiations. 118 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 119 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 120 return {}; 121 } 122 123 if (const auto *VD = dyn_cast<VarDecl>(D)) { 124 if (VD->isStaticDataMember() && 125 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 126 return {}; 127 } 128 129 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 130 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 131 return {}; 132 } 133 134 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 135 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 136 if (TSK == TSK_ImplicitInstantiation || 137 TSK == TSK_Undeclared) 138 return {}; 139 } 140 141 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 142 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 143 return {}; 144 } 145 if (const auto *TD = dyn_cast<TagDecl>(D)) { 146 // When tag declaration (but not definition!) is part of the 147 // decl-specifier-seq of some other declaration, it doesn't get comment 148 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 149 return {}; 150 } 151 // TODO: handle comments for function parameters properly. 152 if (isa<ParmVarDecl>(D)) 153 return {}; 154 155 // TODO: we could look up template parameter documentation in the template 156 // documentation. 157 if (isa<TemplateTypeParmDecl>(D) || 158 isa<NonTypeTemplateParmDecl>(D) || 159 isa<TemplateTemplateParmDecl>(D)) 160 return {}; 161 162 // Find declaration location. 163 // For Objective-C declarations we generally don't expect to have multiple 164 // declarators, thus use declaration starting location as the "declaration 165 // location". 166 // For all other declarations multiple declarators are used quite frequently, 167 // so we use the location of the identifier as the "declaration location". 168 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 169 isa<ObjCPropertyDecl>(D) || 170 isa<RedeclarableTemplateDecl>(D) || 171 isa<ClassTemplateSpecializationDecl>(D) || 172 // Allow association with Y across {} in `typedef struct X {} Y`. 173 isa<TypedefDecl>(D)) 174 return D->getBeginLoc(); 175 else { 176 const SourceLocation DeclLoc = D->getLocation(); 177 if (DeclLoc.isMacroID()) { 178 if (isa<TypedefDecl>(D)) { 179 // If location of the typedef name is in a macro, it is because being 180 // declared via a macro. Try using declaration's starting location as 181 // the "declaration location". 182 return D->getBeginLoc(); 183 } else if (const auto *TD = dyn_cast<TagDecl>(D)) { 184 // If location of the tag decl is inside a macro, but the spelling of 185 // the tag name comes from a macro argument, it looks like a special 186 // macro like NS_ENUM is being used to define the tag decl. In that 187 // case, adjust the source location to the expansion loc so that we can 188 // attach the comment to the tag decl. 189 if (SourceMgr.isMacroArgExpansion(DeclLoc) && 190 TD->isCompleteDefinition()) 191 return SourceMgr.getExpansionLoc(DeclLoc); 192 } 193 } 194 return DeclLoc; 195 } 196 197 return {}; 198 } 199 200 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 201 const Decl *D, const SourceLocation RepresentativeLocForDecl, 202 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 203 // If the declaration doesn't map directly to a location in a file, we 204 // can't find the comment. 205 if (RepresentativeLocForDecl.isInvalid() || 206 !RepresentativeLocForDecl.isFileID()) 207 return nullptr; 208 209 // If there are no comments anywhere, we won't find anything. 210 if (CommentsInTheFile.empty()) 211 return nullptr; 212 213 // Decompose the location for the declaration and find the beginning of the 214 // file buffer. 215 const std::pair<FileID, unsigned> DeclLocDecomp = 216 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 217 218 // Slow path. 219 auto OffsetCommentBehindDecl = 220 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 221 222 // First check whether we have a trailing comment. 223 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 224 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 225 if ((CommentBehindDecl->isDocumentation() || 226 LangOpts.CommentOpts.ParseAllComments) && 227 CommentBehindDecl->isTrailingComment() && 228 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 229 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 230 231 // Check that Doxygen trailing comment comes after the declaration, starts 232 // on the same line and in the same file as the declaration. 233 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 234 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 235 OffsetCommentBehindDecl->first)) { 236 return CommentBehindDecl; 237 } 238 } 239 } 240 241 // The comment just after the declaration was not a trailing comment. 242 // Let's look at the previous comment. 243 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 244 return nullptr; 245 246 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 247 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 248 249 // Check that we actually have a non-member Doxygen comment. 250 if (!(CommentBeforeDecl->isDocumentation() || 251 LangOpts.CommentOpts.ParseAllComments) || 252 CommentBeforeDecl->isTrailingComment()) 253 return nullptr; 254 255 // Decompose the end of the comment. 256 const unsigned CommentEndOffset = 257 Comments.getCommentEndOffset(CommentBeforeDecl); 258 259 // Get the corresponding buffer. 260 bool Invalid = false; 261 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 262 &Invalid).data(); 263 if (Invalid) 264 return nullptr; 265 266 // Extract text between the comment and declaration. 267 StringRef Text(Buffer + CommentEndOffset, 268 DeclLocDecomp.second - CommentEndOffset); 269 270 // There should be no other declarations or preprocessor directives between 271 // comment and declaration. 272 if (Text.find_first_of(";{}#@") != StringRef::npos) 273 return nullptr; 274 275 return CommentBeforeDecl; 276 } 277 278 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 279 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 280 281 // If the declaration doesn't map directly to a location in a file, we 282 // can't find the comment. 283 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 284 return nullptr; 285 286 if (ExternalSource && !CommentsLoaded) { 287 ExternalSource->ReadComments(); 288 CommentsLoaded = true; 289 } 290 291 if (Comments.empty()) 292 return nullptr; 293 294 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 295 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 296 if (!CommentsInThisFile || CommentsInThisFile->empty()) 297 return nullptr; 298 299 return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); 300 } 301 302 void ASTContext::addComment(const RawComment &RC) { 303 assert(LangOpts.RetainCommentsFromSystemHeaders || 304 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 305 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 306 } 307 308 /// If we have a 'templated' declaration for a template, adjust 'D' to 309 /// refer to the actual template. 310 /// If we have an implicit instantiation, adjust 'D' to refer to template. 311 static const Decl &adjustDeclToTemplate(const Decl &D) { 312 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 313 // Is this function declaration part of a function template? 314 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 315 return *FTD; 316 317 // Nothing to do if function is not an implicit instantiation. 318 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 319 return D; 320 321 // Function is an implicit instantiation of a function template? 322 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 323 return *FTD; 324 325 // Function is instantiated from a member definition of a class template? 326 if (const FunctionDecl *MemberDecl = 327 FD->getInstantiatedFromMemberFunction()) 328 return *MemberDecl; 329 330 return D; 331 } 332 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 333 // Static data member is instantiated from a member definition of a class 334 // template? 335 if (VD->isStaticDataMember()) 336 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 337 return *MemberDecl; 338 339 return D; 340 } 341 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 342 // Is this class declaration part of a class template? 343 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 344 return *CTD; 345 346 // Class is an implicit instantiation of a class template or partial 347 // specialization? 348 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 349 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 350 return D; 351 llvm::PointerUnion<ClassTemplateDecl *, 352 ClassTemplatePartialSpecializationDecl *> 353 PU = CTSD->getSpecializedTemplateOrPartial(); 354 return PU.is<ClassTemplateDecl *>() 355 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 356 : *static_cast<const Decl *>( 357 PU.get<ClassTemplatePartialSpecializationDecl *>()); 358 } 359 360 // Class is instantiated from a member definition of a class template? 361 if (const MemberSpecializationInfo *Info = 362 CRD->getMemberSpecializationInfo()) 363 return *Info->getInstantiatedFrom(); 364 365 return D; 366 } 367 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 368 // Enum is instantiated from a member definition of a class template? 369 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 370 return *MemberDecl; 371 372 return D; 373 } 374 // FIXME: Adjust alias templates? 375 return D; 376 } 377 378 const RawComment *ASTContext::getRawCommentForAnyRedecl( 379 const Decl *D, 380 const Decl **OriginalDecl) const { 381 if (!D) { 382 if (OriginalDecl) 383 OriginalDecl = nullptr; 384 return nullptr; 385 } 386 387 D = &adjustDeclToTemplate(*D); 388 389 // Any comment directly attached to D? 390 { 391 auto DeclComment = DeclRawComments.find(D); 392 if (DeclComment != DeclRawComments.end()) { 393 if (OriginalDecl) 394 *OriginalDecl = D; 395 return DeclComment->second; 396 } 397 } 398 399 // Any comment attached to any redeclaration of D? 400 const Decl *CanonicalD = D->getCanonicalDecl(); 401 if (!CanonicalD) 402 return nullptr; 403 404 { 405 auto RedeclComment = RedeclChainComments.find(CanonicalD); 406 if (RedeclComment != RedeclChainComments.end()) { 407 if (OriginalDecl) 408 *OriginalDecl = RedeclComment->second; 409 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 410 assert(CommentAtRedecl != DeclRawComments.end() && 411 "This decl is supposed to have comment attached."); 412 return CommentAtRedecl->second; 413 } 414 } 415 416 // Any redeclarations of D that we haven't checked for comments yet? 417 // We can't use DenseMap::iterator directly since it'd get invalid. 418 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 419 auto LookupRes = CommentlessRedeclChains.find(CanonicalD); 420 if (LookupRes != CommentlessRedeclChains.end()) 421 return LookupRes->second; 422 return nullptr; 423 }(); 424 425 for (const auto Redecl : D->redecls()) { 426 assert(Redecl); 427 // Skip all redeclarations that have been checked previously. 428 if (LastCheckedRedecl) { 429 if (LastCheckedRedecl == Redecl) { 430 LastCheckedRedecl = nullptr; 431 } 432 continue; 433 } 434 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 435 if (RedeclComment) { 436 cacheRawCommentForDecl(*Redecl, *RedeclComment); 437 if (OriginalDecl) 438 *OriginalDecl = Redecl; 439 return RedeclComment; 440 } 441 CommentlessRedeclChains[CanonicalD] = Redecl; 442 } 443 444 if (OriginalDecl) 445 *OriginalDecl = nullptr; 446 return nullptr; 447 } 448 449 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 450 const RawComment &Comment) const { 451 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 452 DeclRawComments.try_emplace(&OriginalD, &Comment); 453 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 454 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 455 CommentlessRedeclChains.erase(CanonicalDecl); 456 } 457 458 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 459 SmallVectorImpl<const NamedDecl *> &Redeclared) { 460 const DeclContext *DC = ObjCMethod->getDeclContext(); 461 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 462 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 463 if (!ID) 464 return; 465 // Add redeclared method here. 466 for (const auto *Ext : ID->known_extensions()) { 467 if (ObjCMethodDecl *RedeclaredMethod = 468 Ext->getMethod(ObjCMethod->getSelector(), 469 ObjCMethod->isInstanceMethod())) 470 Redeclared.push_back(RedeclaredMethod); 471 } 472 } 473 } 474 475 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 476 const Preprocessor *PP) { 477 if (Comments.empty() || Decls.empty()) 478 return; 479 480 FileID File; 481 for (Decl *D : Decls) { 482 SourceLocation Loc = D->getLocation(); 483 if (Loc.isValid()) { 484 // See if there are any new comments that are not attached to a decl. 485 // The location doesn't have to be precise - we care only about the file. 486 File = SourceMgr.getDecomposedLoc(Loc).first; 487 break; 488 } 489 } 490 491 if (File.isInvalid()) 492 return; 493 494 auto CommentsInThisFile = Comments.getCommentsInFile(File); 495 if (!CommentsInThisFile || CommentsInThisFile->empty() || 496 CommentsInThisFile->rbegin()->second->isAttached()) 497 return; 498 499 // There is at least one comment not attached to a decl. 500 // Maybe it should be attached to one of Decls? 501 // 502 // Note that this way we pick up not only comments that precede the 503 // declaration, but also comments that *follow* the declaration -- thanks to 504 // the lookahead in the lexer: we've consumed the semicolon and looked 505 // ahead through comments. 506 507 for (const Decl *D : Decls) { 508 assert(D); 509 if (D->isInvalidDecl()) 510 continue; 511 512 D = &adjustDeclToTemplate(*D); 513 514 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 515 516 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 517 continue; 518 519 if (DeclRawComments.count(D) > 0) 520 continue; 521 522 if (RawComment *const DocComment = 523 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { 524 cacheRawCommentForDecl(*D, *DocComment); 525 comments::FullComment *FC = DocComment->parse(*this, PP, D); 526 ParsedComments[D->getCanonicalDecl()] = FC; 527 } 528 } 529 } 530 531 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 532 const Decl *D) const { 533 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 534 ThisDeclInfo->CommentDecl = D; 535 ThisDeclInfo->IsFilled = false; 536 ThisDeclInfo->fill(); 537 ThisDeclInfo->CommentDecl = FC->getDecl(); 538 if (!ThisDeclInfo->TemplateParameters) 539 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 540 comments::FullComment *CFC = 541 new (*this) comments::FullComment(FC->getBlocks(), 542 ThisDeclInfo); 543 return CFC; 544 } 545 546 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 547 const RawComment *RC = getRawCommentForDeclNoCache(D); 548 return RC ? RC->parse(*this, nullptr, D) : nullptr; 549 } 550 551 comments::FullComment *ASTContext::getCommentForDecl( 552 const Decl *D, 553 const Preprocessor *PP) const { 554 if (!D || D->isInvalidDecl()) 555 return nullptr; 556 D = &adjustDeclToTemplate(*D); 557 558 const Decl *Canonical = D->getCanonicalDecl(); 559 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 560 ParsedComments.find(Canonical); 561 562 if (Pos != ParsedComments.end()) { 563 if (Canonical != D) { 564 comments::FullComment *FC = Pos->second; 565 comments::FullComment *CFC = cloneFullComment(FC, D); 566 return CFC; 567 } 568 return Pos->second; 569 } 570 571 const Decl *OriginalDecl = nullptr; 572 573 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 574 if (!RC) { 575 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 576 SmallVector<const NamedDecl*, 8> Overridden; 577 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 578 if (OMD && OMD->isPropertyAccessor()) 579 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 580 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 581 return cloneFullComment(FC, D); 582 if (OMD) 583 addRedeclaredMethods(OMD, Overridden); 584 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 585 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 586 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 587 return cloneFullComment(FC, D); 588 } 589 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 590 // Attach any tag type's documentation to its typedef if latter 591 // does not have one of its own. 592 QualType QT = TD->getUnderlyingType(); 593 if (const auto *TT = QT->getAs<TagType>()) 594 if (const Decl *TD = TT->getDecl()) 595 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 596 return cloneFullComment(FC, D); 597 } 598 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 599 while (IC->getSuperClass()) { 600 IC = IC->getSuperClass(); 601 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 602 return cloneFullComment(FC, D); 603 } 604 } 605 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 606 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 607 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 608 return cloneFullComment(FC, D); 609 } 610 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 611 if (!(RD = RD->getDefinition())) 612 return nullptr; 613 // Check non-virtual bases. 614 for (const auto &I : RD->bases()) { 615 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 616 continue; 617 QualType Ty = I.getType(); 618 if (Ty.isNull()) 619 continue; 620 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 621 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 622 continue; 623 624 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 625 return cloneFullComment(FC, D); 626 } 627 } 628 // Check virtual bases. 629 for (const auto &I : RD->vbases()) { 630 if (I.getAccessSpecifier() != AS_public) 631 continue; 632 QualType Ty = I.getType(); 633 if (Ty.isNull()) 634 continue; 635 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 636 if (!(VirtualBase= VirtualBase->getDefinition())) 637 continue; 638 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 639 return cloneFullComment(FC, D); 640 } 641 } 642 } 643 return nullptr; 644 } 645 646 // If the RawComment was attached to other redeclaration of this Decl, we 647 // should parse the comment in context of that other Decl. This is important 648 // because comments can contain references to parameter names which can be 649 // different across redeclarations. 650 if (D != OriginalDecl && OriginalDecl) 651 return getCommentForDecl(OriginalDecl, PP); 652 653 comments::FullComment *FC = RC->parse(*this, PP, D); 654 ParsedComments[Canonical] = FC; 655 return FC; 656 } 657 658 void 659 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 660 const ASTContext &C, 661 TemplateTemplateParmDecl *Parm) { 662 ID.AddInteger(Parm->getDepth()); 663 ID.AddInteger(Parm->getPosition()); 664 ID.AddBoolean(Parm->isParameterPack()); 665 666 TemplateParameterList *Params = Parm->getTemplateParameters(); 667 ID.AddInteger(Params->size()); 668 for (TemplateParameterList::const_iterator P = Params->begin(), 669 PEnd = Params->end(); 670 P != PEnd; ++P) { 671 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 672 ID.AddInteger(0); 673 ID.AddBoolean(TTP->isParameterPack()); 674 const TypeConstraint *TC = TTP->getTypeConstraint(); 675 ID.AddBoolean(TC != nullptr); 676 if (TC) 677 TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, 678 /*Canonical=*/true); 679 if (TTP->isExpandedParameterPack()) { 680 ID.AddBoolean(true); 681 ID.AddInteger(TTP->getNumExpansionParameters()); 682 } else 683 ID.AddBoolean(false); 684 continue; 685 } 686 687 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 688 ID.AddInteger(1); 689 ID.AddBoolean(NTTP->isParameterPack()); 690 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); 691 if (NTTP->isExpandedParameterPack()) { 692 ID.AddBoolean(true); 693 ID.AddInteger(NTTP->getNumExpansionTypes()); 694 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 695 QualType T = NTTP->getExpansionType(I); 696 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 697 } 698 } else 699 ID.AddBoolean(false); 700 continue; 701 } 702 703 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 704 ID.AddInteger(2); 705 Profile(ID, C, TTP); 706 } 707 Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); 708 ID.AddBoolean(RequiresClause != nullptr); 709 if (RequiresClause) 710 RequiresClause->Profile(ID, C, /*Canonical=*/true); 711 } 712 713 static Expr * 714 canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, 715 QualType ConstrainedType) { 716 // This is a bit ugly - we need to form a new immediately-declared 717 // constraint that references the new parameter; this would ideally 718 // require semantic analysis (e.g. template<C T> struct S {}; - the 719 // converted arguments of C<T> could be an argument pack if C is 720 // declared as template<typename... T> concept C = ...). 721 // We don't have semantic analysis here so we dig deep into the 722 // ready-made constraint expr and change the thing manually. 723 ConceptSpecializationExpr *CSE; 724 if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC)) 725 CSE = cast<ConceptSpecializationExpr>(Fold->getLHS()); 726 else 727 CSE = cast<ConceptSpecializationExpr>(IDC); 728 ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments(); 729 SmallVector<TemplateArgument, 3> NewConverted; 730 NewConverted.reserve(OldConverted.size()); 731 if (OldConverted.front().getKind() == TemplateArgument::Pack) { 732 // The case: 733 // template<typename... T> concept C = true; 734 // template<C<int> T> struct S; -> constraint is C<{T, int}> 735 NewConverted.push_back(ConstrainedType); 736 for (auto &Arg : OldConverted.front().pack_elements().drop_front(1)) 737 NewConverted.push_back(Arg); 738 TemplateArgument NewPack(NewConverted); 739 740 NewConverted.clear(); 741 NewConverted.push_back(NewPack); 742 assert(OldConverted.size() == 1 && 743 "Template parameter pack should be the last parameter"); 744 } else { 745 assert(OldConverted.front().getKind() == TemplateArgument::Type && 746 "Unexpected first argument kind for immediately-declared " 747 "constraint"); 748 NewConverted.push_back(ConstrainedType); 749 for (auto &Arg : OldConverted.drop_front(1)) 750 NewConverted.push_back(Arg); 751 } 752 Expr *NewIDC = ConceptSpecializationExpr::Create( 753 C, CSE->getNamedConcept(), NewConverted, nullptr, 754 CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack()); 755 756 if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC)) 757 NewIDC = new (C) CXXFoldExpr( 758 OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC, 759 BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, 760 SourceLocation(), /*NumExpansions=*/None); 761 return NewIDC; 762 } 763 764 TemplateTemplateParmDecl * 765 ASTContext::getCanonicalTemplateTemplateParmDecl( 766 TemplateTemplateParmDecl *TTP) const { 767 // Check if we already have a canonical template template parameter. 768 llvm::FoldingSetNodeID ID; 769 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 770 void *InsertPos = nullptr; 771 CanonicalTemplateTemplateParm *Canonical 772 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 773 if (Canonical) 774 return Canonical->getParam(); 775 776 // Build a canonical template parameter list. 777 TemplateParameterList *Params = TTP->getTemplateParameters(); 778 SmallVector<NamedDecl *, 4> CanonParams; 779 CanonParams.reserve(Params->size()); 780 for (TemplateParameterList::const_iterator P = Params->begin(), 781 PEnd = Params->end(); 782 P != PEnd; ++P) { 783 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 784 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this, 785 getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 786 TTP->getDepth(), TTP->getIndex(), nullptr, false, 787 TTP->isParameterPack(), TTP->hasTypeConstraint(), 788 TTP->isExpandedParameterPack() ? 789 llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None); 790 if (const auto *TC = TTP->getTypeConstraint()) { 791 QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); 792 Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( 793 *this, TC->getImmediatelyDeclaredConstraint(), 794 ParamAsArgument); 795 TemplateArgumentListInfo CanonArgsAsWritten; 796 if (auto *Args = TC->getTemplateArgsAsWritten()) 797 for (const auto &ArgLoc : Args->arguments()) 798 CanonArgsAsWritten.addArgument( 799 TemplateArgumentLoc(ArgLoc.getArgument(), 800 TemplateArgumentLocInfo())); 801 NewTTP->setTypeConstraint( 802 NestedNameSpecifierLoc(), 803 DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), 804 SourceLocation()), /*FoundDecl=*/nullptr, 805 // Actually canonicalizing a TemplateArgumentLoc is difficult so we 806 // simply omit the ArgsAsWritten 807 TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); 808 } 809 CanonParams.push_back(NewTTP); 810 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 811 QualType T = getCanonicalType(NTTP->getType()); 812 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 813 NonTypeTemplateParmDecl *Param; 814 if (NTTP->isExpandedParameterPack()) { 815 SmallVector<QualType, 2> ExpandedTypes; 816 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 817 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 818 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 819 ExpandedTInfos.push_back( 820 getTrivialTypeSourceInfo(ExpandedTypes.back())); 821 } 822 823 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 824 SourceLocation(), 825 SourceLocation(), 826 NTTP->getDepth(), 827 NTTP->getPosition(), nullptr, 828 T, 829 TInfo, 830 ExpandedTypes, 831 ExpandedTInfos); 832 } else { 833 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 834 SourceLocation(), 835 SourceLocation(), 836 NTTP->getDepth(), 837 NTTP->getPosition(), nullptr, 838 T, 839 NTTP->isParameterPack(), 840 TInfo); 841 } 842 if (AutoType *AT = T->getContainedAutoType()) { 843 if (AT->isConstrained()) { 844 Param->setPlaceholderTypeConstraint( 845 canonicalizeImmediatelyDeclaredConstraint( 846 *this, NTTP->getPlaceholderTypeConstraint(), T)); 847 } 848 } 849 CanonParams.push_back(Param); 850 851 } else 852 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 853 cast<TemplateTemplateParmDecl>(*P))); 854 } 855 856 Expr *CanonRequiresClause = nullptr; 857 if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) 858 CanonRequiresClause = RequiresClause; 859 860 TemplateTemplateParmDecl *CanonTTP 861 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 862 SourceLocation(), TTP->getDepth(), 863 TTP->getPosition(), 864 TTP->isParameterPack(), 865 nullptr, 866 TemplateParameterList::Create(*this, SourceLocation(), 867 SourceLocation(), 868 CanonParams, 869 SourceLocation(), 870 CanonRequiresClause)); 871 872 // Get the new insert position for the node we care about. 873 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 874 assert(!Canonical && "Shouldn't be in the map!"); 875 (void)Canonical; 876 877 // Create the canonical template template parameter entry. 878 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 879 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 880 return CanonTTP; 881 } 882 883 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 884 auto Kind = getTargetInfo().getCXXABI().getKind(); 885 return getLangOpts().CXXABI.getValueOr(Kind); 886 } 887 888 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 889 if (!LangOpts.CPlusPlus) return nullptr; 890 891 switch (getCXXABIKind()) { 892 case TargetCXXABI::AppleARM64: 893 case TargetCXXABI::Fuchsia: 894 case TargetCXXABI::GenericARM: // Same as Itanium at this level 895 case TargetCXXABI::iOS: 896 case TargetCXXABI::WatchOS: 897 case TargetCXXABI::GenericAArch64: 898 case TargetCXXABI::GenericMIPS: 899 case TargetCXXABI::GenericItanium: 900 case TargetCXXABI::WebAssembly: 901 case TargetCXXABI::XL: 902 return CreateItaniumCXXABI(*this); 903 case TargetCXXABI::Microsoft: 904 return CreateMicrosoftCXXABI(*this); 905 } 906 llvm_unreachable("Invalid CXXABI type!"); 907 } 908 909 interp::Context &ASTContext::getInterpContext() { 910 if (!InterpContext) { 911 InterpContext.reset(new interp::Context(*this)); 912 } 913 return *InterpContext.get(); 914 } 915 916 ParentMapContext &ASTContext::getParentMapContext() { 917 if (!ParentMapCtx) 918 ParentMapCtx.reset(new ParentMapContext(*this)); 919 return *ParentMapCtx.get(); 920 } 921 922 static const LangASMap *getAddressSpaceMap(const TargetInfo &T, 923 const LangOptions &LOpts) { 924 if (LOpts.FakeAddressSpaceMap) { 925 // The fake address space map must have a distinct entry for each 926 // language-specific address space. 927 static const unsigned FakeAddrSpaceMap[] = { 928 0, // Default 929 1, // opencl_global 930 3, // opencl_local 931 2, // opencl_constant 932 0, // opencl_private 933 4, // opencl_generic 934 5, // opencl_global_device 935 6, // opencl_global_host 936 7, // cuda_device 937 8, // cuda_constant 938 9, // cuda_shared 939 1, // sycl_global 940 5, // sycl_global_device 941 6, // sycl_global_host 942 3, // sycl_local 943 0, // sycl_private 944 10, // ptr32_sptr 945 11, // ptr32_uptr 946 12 // ptr64 947 }; 948 return &FakeAddrSpaceMap; 949 } else { 950 return &T.getAddressSpaceMap(); 951 } 952 } 953 954 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 955 const LangOptions &LangOpts) { 956 switch (LangOpts.getAddressSpaceMapMangling()) { 957 case LangOptions::ASMM_Target: 958 return TI.useAddressSpaceMapMangling(); 959 case LangOptions::ASMM_On: 960 return true; 961 case LangOptions::ASMM_Off: 962 return false; 963 } 964 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 965 } 966 967 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 968 IdentifierTable &idents, SelectorTable &sels, 969 Builtin::Context &builtins, TranslationUnitKind TUKind) 970 : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()), 971 TemplateSpecializationTypes(this_()), 972 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 973 SubstTemplateTemplateParmPacks(this_()), 974 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 975 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 976 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 977 LangOpts.XRayNeverInstrumentFiles, 978 LangOpts.XRayAttrListFiles, SM)), 979 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 980 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 981 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 982 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 983 CompCategories(this_()), LastSDM(nullptr, 0) { 984 addTranslationUnitDecl(); 985 } 986 987 ASTContext::~ASTContext() { 988 // Release the DenseMaps associated with DeclContext objects. 989 // FIXME: Is this the ideal solution? 990 ReleaseDeclContextMaps(); 991 992 // Call all of the deallocation functions on all of their targets. 993 for (auto &Pair : Deallocations) 994 (Pair.first)(Pair.second); 995 996 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 997 // because they can contain DenseMaps. 998 for (llvm::DenseMap<const ObjCContainerDecl*, 999 const ASTRecordLayout*>::iterator 1000 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 1001 // Increment in loop to prevent using deallocated memory. 1002 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1003 R->Destroy(*this); 1004 1005 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 1006 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 1007 // Increment in loop to prevent using deallocated memory. 1008 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1009 R->Destroy(*this); 1010 } 1011 1012 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 1013 AEnd = DeclAttrs.end(); 1014 A != AEnd; ++A) 1015 A->second->~AttrVec(); 1016 1017 for (const auto &Value : ModuleInitializers) 1018 Value.second->~PerModuleInitializers(); 1019 } 1020 1021 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 1022 TraversalScope = TopLevelDecls; 1023 getParentMapContext().clear(); 1024 } 1025 1026 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 1027 Deallocations.push_back({Callback, Data}); 1028 } 1029 1030 void 1031 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 1032 ExternalSource = std::move(Source); 1033 } 1034 1035 void ASTContext::PrintStats() const { 1036 llvm::errs() << "\n*** AST Context Stats:\n"; 1037 llvm::errs() << " " << Types.size() << " types total.\n"; 1038 1039 unsigned counts[] = { 1040 #define TYPE(Name, Parent) 0, 1041 #define ABSTRACT_TYPE(Name, Parent) 1042 #include "clang/AST/TypeNodes.inc" 1043 0 // Extra 1044 }; 1045 1046 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 1047 Type *T = Types[i]; 1048 counts[(unsigned)T->getTypeClass()]++; 1049 } 1050 1051 unsigned Idx = 0; 1052 unsigned TotalBytes = 0; 1053 #define TYPE(Name, Parent) \ 1054 if (counts[Idx]) \ 1055 llvm::errs() << " " << counts[Idx] << " " << #Name \ 1056 << " types, " << sizeof(Name##Type) << " each " \ 1057 << "(" << counts[Idx] * sizeof(Name##Type) \ 1058 << " bytes)\n"; \ 1059 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 1060 ++Idx; 1061 #define ABSTRACT_TYPE(Name, Parent) 1062 #include "clang/AST/TypeNodes.inc" 1063 1064 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 1065 1066 // Implicit special member functions. 1067 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 1068 << NumImplicitDefaultConstructors 1069 << " implicit default constructors created\n"; 1070 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 1071 << NumImplicitCopyConstructors 1072 << " implicit copy constructors created\n"; 1073 if (getLangOpts().CPlusPlus) 1074 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 1075 << NumImplicitMoveConstructors 1076 << " implicit move constructors created\n"; 1077 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 1078 << NumImplicitCopyAssignmentOperators 1079 << " implicit copy assignment operators created\n"; 1080 if (getLangOpts().CPlusPlus) 1081 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 1082 << NumImplicitMoveAssignmentOperators 1083 << " implicit move assignment operators created\n"; 1084 llvm::errs() << NumImplicitDestructorsDeclared << "/" 1085 << NumImplicitDestructors 1086 << " implicit destructors created\n"; 1087 1088 if (ExternalSource) { 1089 llvm::errs() << "\n"; 1090 ExternalSource->PrintStats(); 1091 } 1092 1093 BumpAlloc.PrintStats(); 1094 } 1095 1096 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1097 bool NotifyListeners) { 1098 if (NotifyListeners) 1099 if (auto *Listener = getASTMutationListener()) 1100 Listener->RedefinedHiddenDefinition(ND, M); 1101 1102 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1103 } 1104 1105 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1106 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1107 if (It == MergedDefModules.end()) 1108 return; 1109 1110 auto &Merged = It->second; 1111 llvm::DenseSet<Module*> Found; 1112 for (Module *&M : Merged) 1113 if (!Found.insert(M).second) 1114 M = nullptr; 1115 Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end()); 1116 } 1117 1118 ArrayRef<Module *> 1119 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1120 auto MergedIt = 1121 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1122 if (MergedIt == MergedDefModules.end()) 1123 return None; 1124 return MergedIt->second; 1125 } 1126 1127 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1128 if (LazyInitializers.empty()) 1129 return; 1130 1131 auto *Source = Ctx.getExternalSource(); 1132 assert(Source && "lazy initializers but no external source"); 1133 1134 auto LazyInits = std::move(LazyInitializers); 1135 LazyInitializers.clear(); 1136 1137 for (auto ID : LazyInits) 1138 Initializers.push_back(Source->GetExternalDecl(ID)); 1139 1140 assert(LazyInitializers.empty() && 1141 "GetExternalDecl for lazy module initializer added more inits"); 1142 } 1143 1144 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1145 // One special case: if we add a module initializer that imports another 1146 // module, and that module's only initializer is an ImportDecl, simplify. 1147 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1148 auto It = ModuleInitializers.find(ID->getImportedModule()); 1149 1150 // Maybe the ImportDecl does nothing at all. (Common case.) 1151 if (It == ModuleInitializers.end()) 1152 return; 1153 1154 // Maybe the ImportDecl only imports another ImportDecl. 1155 auto &Imported = *It->second; 1156 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1157 Imported.resolve(*this); 1158 auto *OnlyDecl = Imported.Initializers.front(); 1159 if (isa<ImportDecl>(OnlyDecl)) 1160 D = OnlyDecl; 1161 } 1162 } 1163 1164 auto *&Inits = ModuleInitializers[M]; 1165 if (!Inits) 1166 Inits = new (*this) PerModuleInitializers; 1167 Inits->Initializers.push_back(D); 1168 } 1169 1170 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1171 auto *&Inits = ModuleInitializers[M]; 1172 if (!Inits) 1173 Inits = new (*this) PerModuleInitializers; 1174 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1175 IDs.begin(), IDs.end()); 1176 } 1177 1178 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1179 auto It = ModuleInitializers.find(M); 1180 if (It == ModuleInitializers.end()) 1181 return None; 1182 1183 auto *Inits = It->second; 1184 Inits->resolve(*this); 1185 return Inits->Initializers; 1186 } 1187 1188 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1189 if (!ExternCContext) 1190 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1191 1192 return ExternCContext; 1193 } 1194 1195 BuiltinTemplateDecl * 1196 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1197 const IdentifierInfo *II) const { 1198 auto *BuiltinTemplate = 1199 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1200 BuiltinTemplate->setImplicit(); 1201 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1202 1203 return BuiltinTemplate; 1204 } 1205 1206 BuiltinTemplateDecl * 1207 ASTContext::getMakeIntegerSeqDecl() const { 1208 if (!MakeIntegerSeqDecl) 1209 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1210 getMakeIntegerSeqName()); 1211 return MakeIntegerSeqDecl; 1212 } 1213 1214 BuiltinTemplateDecl * 1215 ASTContext::getTypePackElementDecl() const { 1216 if (!TypePackElementDecl) 1217 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1218 getTypePackElementName()); 1219 return TypePackElementDecl; 1220 } 1221 1222 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1223 RecordDecl::TagKind TK) const { 1224 SourceLocation Loc; 1225 RecordDecl *NewDecl; 1226 if (getLangOpts().CPlusPlus) 1227 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1228 Loc, &Idents.get(Name)); 1229 else 1230 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1231 &Idents.get(Name)); 1232 NewDecl->setImplicit(); 1233 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1234 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1235 return NewDecl; 1236 } 1237 1238 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1239 StringRef Name) const { 1240 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1241 TypedefDecl *NewDecl = TypedefDecl::Create( 1242 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1243 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1244 NewDecl->setImplicit(); 1245 return NewDecl; 1246 } 1247 1248 TypedefDecl *ASTContext::getInt128Decl() const { 1249 if (!Int128Decl) 1250 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1251 return Int128Decl; 1252 } 1253 1254 TypedefDecl *ASTContext::getUInt128Decl() const { 1255 if (!UInt128Decl) 1256 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1257 return UInt128Decl; 1258 } 1259 1260 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1261 auto *Ty = new (*this, TypeAlignment) BuiltinType(K); 1262 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1263 Types.push_back(Ty); 1264 } 1265 1266 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1267 const TargetInfo *AuxTarget) { 1268 assert((!this->Target || this->Target == &Target) && 1269 "Incorrect target reinitialization"); 1270 assert(VoidTy.isNull() && "Context reinitialized?"); 1271 1272 this->Target = &Target; 1273 this->AuxTarget = AuxTarget; 1274 1275 ABI.reset(createCXXABI(Target)); 1276 AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); 1277 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1278 1279 // C99 6.2.5p19. 1280 InitBuiltinType(VoidTy, BuiltinType::Void); 1281 1282 // C99 6.2.5p2. 1283 InitBuiltinType(BoolTy, BuiltinType::Bool); 1284 // C99 6.2.5p3. 1285 if (LangOpts.CharIsSigned) 1286 InitBuiltinType(CharTy, BuiltinType::Char_S); 1287 else 1288 InitBuiltinType(CharTy, BuiltinType::Char_U); 1289 // C99 6.2.5p4. 1290 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1291 InitBuiltinType(ShortTy, BuiltinType::Short); 1292 InitBuiltinType(IntTy, BuiltinType::Int); 1293 InitBuiltinType(LongTy, BuiltinType::Long); 1294 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1295 1296 // C99 6.2.5p6. 1297 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1298 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1299 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1300 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1301 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1302 1303 // C99 6.2.5p10. 1304 InitBuiltinType(FloatTy, BuiltinType::Float); 1305 InitBuiltinType(DoubleTy, BuiltinType::Double); 1306 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1307 1308 // GNU extension, __float128 for IEEE quadruple precision 1309 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1310 1311 // C11 extension ISO/IEC TS 18661-3 1312 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1313 1314 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1315 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1316 InitBuiltinType(AccumTy, BuiltinType::Accum); 1317 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1318 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1319 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1320 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1321 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1322 InitBuiltinType(FractTy, BuiltinType::Fract); 1323 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1324 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1325 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1326 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1327 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1328 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1329 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1330 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1331 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1332 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1333 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1334 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1335 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1336 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1337 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1338 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1339 1340 // GNU extension, 128-bit integers. 1341 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1342 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1343 1344 // C++ 3.9.1p5 1345 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1346 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1347 else // -fshort-wchar makes wchar_t be unsigned. 1348 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1349 if (LangOpts.CPlusPlus && LangOpts.WChar) 1350 WideCharTy = WCharTy; 1351 else { 1352 // C99 (or C++ using -fno-wchar). 1353 WideCharTy = getFromTargetType(Target.getWCharType()); 1354 } 1355 1356 WIntTy = getFromTargetType(Target.getWIntType()); 1357 1358 // C++20 (proposed) 1359 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1360 1361 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1362 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1363 else // C99 1364 Char16Ty = getFromTargetType(Target.getChar16Type()); 1365 1366 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1367 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1368 else // C99 1369 Char32Ty = getFromTargetType(Target.getChar32Type()); 1370 1371 // Placeholder type for type-dependent expressions whose type is 1372 // completely unknown. No code should ever check a type against 1373 // DependentTy and users should never see it; however, it is here to 1374 // help diagnose failures to properly check for type-dependent 1375 // expressions. 1376 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1377 1378 // Placeholder type for functions. 1379 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1380 1381 // Placeholder type for bound members. 1382 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1383 1384 // Placeholder type for pseudo-objects. 1385 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1386 1387 // "any" type; useful for debugger-like clients. 1388 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1389 1390 // Placeholder type for unbridged ARC casts. 1391 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1392 1393 // Placeholder type for builtin functions. 1394 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1395 1396 // Placeholder type for OMP array sections. 1397 if (LangOpts.OpenMP) { 1398 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1399 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1400 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1401 } 1402 if (LangOpts.MatrixTypes) 1403 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1404 1405 // C99 6.2.5p11. 1406 FloatComplexTy = getComplexType(FloatTy); 1407 DoubleComplexTy = getComplexType(DoubleTy); 1408 LongDoubleComplexTy = getComplexType(LongDoubleTy); 1409 Float128ComplexTy = getComplexType(Float128Ty); 1410 1411 // Builtin types for 'id', 'Class', and 'SEL'. 1412 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1413 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1414 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1415 1416 if (LangOpts.OpenCL) { 1417 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1418 InitBuiltinType(SingletonId, BuiltinType::Id); 1419 #include "clang/Basic/OpenCLImageTypes.def" 1420 1421 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1422 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1423 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1424 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1425 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1426 1427 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1428 InitBuiltinType(Id##Ty, BuiltinType::Id); 1429 #include "clang/Basic/OpenCLExtensionTypes.def" 1430 } 1431 1432 if (Target.hasAArch64SVETypes()) { 1433 #define SVE_TYPE(Name, Id, SingletonId) \ 1434 InitBuiltinType(SingletonId, BuiltinType::Id); 1435 #include "clang/Basic/AArch64SVEACLETypes.def" 1436 } 1437 1438 if (Target.getTriple().isPPC64() && 1439 Target.hasFeature("paired-vector-memops")) { 1440 if (Target.hasFeature("mma")) { 1441 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1442 InitBuiltinType(Id##Ty, BuiltinType::Id); 1443 #include "clang/Basic/PPCTypes.def" 1444 } 1445 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1446 InitBuiltinType(Id##Ty, BuiltinType::Id); 1447 #include "clang/Basic/PPCTypes.def" 1448 } 1449 1450 if (Target.hasRISCVVTypes()) { 1451 #define RVV_TYPE(Name, Id, SingletonId) \ 1452 InitBuiltinType(SingletonId, BuiltinType::Id); 1453 #include "clang/Basic/RISCVVTypes.def" 1454 } 1455 1456 // Builtin type for __objc_yes and __objc_no 1457 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1458 SignedCharTy : BoolTy); 1459 1460 ObjCConstantStringType = QualType(); 1461 1462 ObjCSuperType = QualType(); 1463 1464 // void * type 1465 if (LangOpts.OpenCLGenericAddressSpace) { 1466 auto Q = VoidTy.getQualifiers(); 1467 Q.setAddressSpace(LangAS::opencl_generic); 1468 VoidPtrTy = getPointerType(getCanonicalType( 1469 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1470 } else { 1471 VoidPtrTy = getPointerType(VoidTy); 1472 } 1473 1474 // nullptr type (C++0x 2.14.7) 1475 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1476 1477 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1478 InitBuiltinType(HalfTy, BuiltinType::Half); 1479 1480 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1481 1482 // Builtin type used to help define __builtin_va_list. 1483 VaListTagDecl = nullptr; 1484 1485 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1486 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1487 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1488 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1489 } 1490 } 1491 1492 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1493 return SourceMgr.getDiagnostics(); 1494 } 1495 1496 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1497 AttrVec *&Result = DeclAttrs[D]; 1498 if (!Result) { 1499 void *Mem = Allocate(sizeof(AttrVec)); 1500 Result = new (Mem) AttrVec; 1501 } 1502 1503 return *Result; 1504 } 1505 1506 /// Erase the attributes corresponding to the given declaration. 1507 void ASTContext::eraseDeclAttrs(const Decl *D) { 1508 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1509 if (Pos != DeclAttrs.end()) { 1510 Pos->second->~AttrVec(); 1511 DeclAttrs.erase(Pos); 1512 } 1513 } 1514 1515 // FIXME: Remove ? 1516 MemberSpecializationInfo * 1517 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1518 assert(Var->isStaticDataMember() && "Not a static data member"); 1519 return getTemplateOrSpecializationInfo(Var) 1520 .dyn_cast<MemberSpecializationInfo *>(); 1521 } 1522 1523 ASTContext::TemplateOrSpecializationInfo 1524 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1525 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1526 TemplateOrInstantiation.find(Var); 1527 if (Pos == TemplateOrInstantiation.end()) 1528 return {}; 1529 1530 return Pos->second; 1531 } 1532 1533 void 1534 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1535 TemplateSpecializationKind TSK, 1536 SourceLocation PointOfInstantiation) { 1537 assert(Inst->isStaticDataMember() && "Not a static data member"); 1538 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1539 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1540 Tmpl, TSK, PointOfInstantiation)); 1541 } 1542 1543 void 1544 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1545 TemplateOrSpecializationInfo TSI) { 1546 assert(!TemplateOrInstantiation[Inst] && 1547 "Already noted what the variable was instantiated from"); 1548 TemplateOrInstantiation[Inst] = TSI; 1549 } 1550 1551 NamedDecl * 1552 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1553 auto Pos = InstantiatedFromUsingDecl.find(UUD); 1554 if (Pos == InstantiatedFromUsingDecl.end()) 1555 return nullptr; 1556 1557 return Pos->second; 1558 } 1559 1560 void 1561 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1562 assert((isa<UsingDecl>(Pattern) || 1563 isa<UnresolvedUsingValueDecl>(Pattern) || 1564 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1565 "pattern decl is not a using decl"); 1566 assert((isa<UsingDecl>(Inst) || 1567 isa<UnresolvedUsingValueDecl>(Inst) || 1568 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1569 "instantiation did not produce a using decl"); 1570 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1571 InstantiatedFromUsingDecl[Inst] = Pattern; 1572 } 1573 1574 UsingEnumDecl * 1575 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1576 auto Pos = InstantiatedFromUsingEnumDecl.find(UUD); 1577 if (Pos == InstantiatedFromUsingEnumDecl.end()) 1578 return nullptr; 1579 1580 return Pos->second; 1581 } 1582 1583 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1584 UsingEnumDecl *Pattern) { 1585 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1586 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1587 } 1588 1589 UsingShadowDecl * 1590 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1591 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 1592 = InstantiatedFromUsingShadowDecl.find(Inst); 1593 if (Pos == InstantiatedFromUsingShadowDecl.end()) 1594 return nullptr; 1595 1596 return Pos->second; 1597 } 1598 1599 void 1600 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1601 UsingShadowDecl *Pattern) { 1602 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1603 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1604 } 1605 1606 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1607 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 1608 = InstantiatedFromUnnamedFieldDecl.find(Field); 1609 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 1610 return nullptr; 1611 1612 return Pos->second; 1613 } 1614 1615 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1616 FieldDecl *Tmpl) { 1617 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1618 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1619 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1620 "Already noted what unnamed field was instantiated from"); 1621 1622 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1623 } 1624 1625 ASTContext::overridden_cxx_method_iterator 1626 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1627 return overridden_methods(Method).begin(); 1628 } 1629 1630 ASTContext::overridden_cxx_method_iterator 1631 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1632 return overridden_methods(Method).end(); 1633 } 1634 1635 unsigned 1636 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1637 auto Range = overridden_methods(Method); 1638 return Range.end() - Range.begin(); 1639 } 1640 1641 ASTContext::overridden_method_range 1642 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1643 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1644 OverriddenMethods.find(Method->getCanonicalDecl()); 1645 if (Pos == OverriddenMethods.end()) 1646 return overridden_method_range(nullptr, nullptr); 1647 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1648 } 1649 1650 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1651 const CXXMethodDecl *Overridden) { 1652 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1653 OverriddenMethods[Method].push_back(Overridden); 1654 } 1655 1656 void ASTContext::getOverriddenMethods( 1657 const NamedDecl *D, 1658 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1659 assert(D); 1660 1661 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1662 Overridden.append(overridden_methods_begin(CXXMethod), 1663 overridden_methods_end(CXXMethod)); 1664 return; 1665 } 1666 1667 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1668 if (!Method) 1669 return; 1670 1671 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1672 Method->getOverriddenMethods(OverDecls); 1673 Overridden.append(OverDecls.begin(), OverDecls.end()); 1674 } 1675 1676 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1677 assert(!Import->getNextLocalImport() && 1678 "Import declaration already in the chain"); 1679 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1680 if (!FirstLocalImport) { 1681 FirstLocalImport = Import; 1682 LastLocalImport = Import; 1683 return; 1684 } 1685 1686 LastLocalImport->setNextLocalImport(Import); 1687 LastLocalImport = Import; 1688 } 1689 1690 //===----------------------------------------------------------------------===// 1691 // Type Sizing and Analysis 1692 //===----------------------------------------------------------------------===// 1693 1694 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1695 /// scalar floating point type. 1696 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1697 switch (T->castAs<BuiltinType>()->getKind()) { 1698 default: 1699 llvm_unreachable("Not a floating point type!"); 1700 case BuiltinType::BFloat16: 1701 return Target->getBFloat16Format(); 1702 case BuiltinType::Float16: 1703 case BuiltinType::Half: 1704 return Target->getHalfFormat(); 1705 case BuiltinType::Float: return Target->getFloatFormat(); 1706 case BuiltinType::Double: return Target->getDoubleFormat(); 1707 case BuiltinType::LongDouble: 1708 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1709 return AuxTarget->getLongDoubleFormat(); 1710 return Target->getLongDoubleFormat(); 1711 case BuiltinType::Float128: 1712 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1713 return AuxTarget->getFloat128Format(); 1714 return Target->getFloat128Format(); 1715 } 1716 } 1717 1718 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1719 unsigned Align = Target->getCharWidth(); 1720 1721 bool UseAlignAttrOnly = false; 1722 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1723 Align = AlignFromAttr; 1724 1725 // __attribute__((aligned)) can increase or decrease alignment 1726 // *except* on a struct or struct member, where it only increases 1727 // alignment unless 'packed' is also specified. 1728 // 1729 // It is an error for alignas to decrease alignment, so we can 1730 // ignore that possibility; Sema should diagnose it. 1731 if (isa<FieldDecl>(D)) { 1732 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1733 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1734 } else { 1735 UseAlignAttrOnly = true; 1736 } 1737 } 1738 else if (isa<FieldDecl>(D)) 1739 UseAlignAttrOnly = 1740 D->hasAttr<PackedAttr>() || 1741 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1742 1743 // If we're using the align attribute only, just ignore everything 1744 // else about the declaration and its type. 1745 if (UseAlignAttrOnly) { 1746 // do nothing 1747 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1748 QualType T = VD->getType(); 1749 if (const auto *RT = T->getAs<ReferenceType>()) { 1750 if (ForAlignof) 1751 T = RT->getPointeeType(); 1752 else 1753 T = getPointerType(RT->getPointeeType()); 1754 } 1755 QualType BaseT = getBaseElementType(T); 1756 if (T->isFunctionType()) 1757 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1758 else if (!BaseT->isIncompleteType()) { 1759 // Adjust alignments of declarations with array type by the 1760 // large-array alignment on the target. 1761 if (const ArrayType *arrayType = getAsArrayType(T)) { 1762 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1763 if (!ForAlignof && MinWidth) { 1764 if (isa<VariableArrayType>(arrayType)) 1765 Align = std::max(Align, Target->getLargeArrayAlign()); 1766 else if (isa<ConstantArrayType>(arrayType) && 1767 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1768 Align = std::max(Align, Target->getLargeArrayAlign()); 1769 } 1770 } 1771 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1772 if (BaseT.getQualifiers().hasUnaligned()) 1773 Align = Target->getCharWidth(); 1774 if (const auto *VD = dyn_cast<VarDecl>(D)) { 1775 if (VD->hasGlobalStorage() && !ForAlignof) { 1776 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 1777 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1778 } 1779 } 1780 } 1781 1782 // Fields can be subject to extra alignment constraints, like if 1783 // the field is packed, the struct is packed, or the struct has a 1784 // a max-field-alignment constraint (#pragma pack). So calculate 1785 // the actual alignment of the field within the struct, and then 1786 // (as we're expected to) constrain that by the alignment of the type. 1787 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1788 const RecordDecl *Parent = Field->getParent(); 1789 // We can only produce a sensible answer if the record is valid. 1790 if (!Parent->isInvalidDecl()) { 1791 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1792 1793 // Start with the record's overall alignment. 1794 unsigned FieldAlign = toBits(Layout.getAlignment()); 1795 1796 // Use the GCD of that and the offset within the record. 1797 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1798 if (Offset > 0) { 1799 // Alignment is always a power of 2, so the GCD will be a power of 2, 1800 // which means we get to do this crazy thing instead of Euclid's. 1801 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1802 if (LowBitOfOffset < FieldAlign) 1803 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1804 } 1805 1806 Align = std::min(Align, FieldAlign); 1807 } 1808 } 1809 } 1810 1811 // Some targets have hard limitation on the maximum requestable alignment in 1812 // aligned attribute for static variables. 1813 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1814 const auto *VD = dyn_cast<VarDecl>(D); 1815 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1816 Align = std::min(Align, MaxAlignedAttr); 1817 1818 return toCharUnitsFromBits(Align); 1819 } 1820 1821 CharUnits ASTContext::getExnObjectAlignment() const { 1822 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1823 } 1824 1825 // getTypeInfoDataSizeInChars - Return the size of a type, in 1826 // chars. If the type is a record, its data size is returned. This is 1827 // the size of the memcpy that's performed when assigning this type 1828 // using a trivial copy/move assignment operator. 1829 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1830 TypeInfoChars Info = getTypeInfoInChars(T); 1831 1832 // In C++, objects can sometimes be allocated into the tail padding 1833 // of a base-class subobject. We decide whether that's possible 1834 // during class layout, so here we can just trust the layout results. 1835 if (getLangOpts().CPlusPlus) { 1836 if (const auto *RT = T->getAs<RecordType>()) { 1837 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1838 Info.Width = layout.getDataSize(); 1839 } 1840 } 1841 1842 return Info; 1843 } 1844 1845 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1846 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1847 TypeInfoChars 1848 static getConstantArrayInfoInChars(const ASTContext &Context, 1849 const ConstantArrayType *CAT) { 1850 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1851 uint64_t Size = CAT->getSize().getZExtValue(); 1852 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1853 (uint64_t)(-1)/Size) && 1854 "Overflow in array type char size evaluation"); 1855 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1856 unsigned Align = EltInfo.Align.getQuantity(); 1857 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1858 Context.getTargetInfo().getPointerWidth(0) == 64) 1859 Width = llvm::alignTo(Width, Align); 1860 return TypeInfoChars(CharUnits::fromQuantity(Width), 1861 CharUnits::fromQuantity(Align), 1862 EltInfo.AlignIsRequired); 1863 } 1864 1865 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1866 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1867 return getConstantArrayInfoInChars(*this, CAT); 1868 TypeInfo Info = getTypeInfo(T); 1869 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1870 toCharUnitsFromBits(Info.Align), 1871 Info.AlignIsRequired); 1872 } 1873 1874 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1875 return getTypeInfoInChars(T.getTypePtr()); 1876 } 1877 1878 bool ASTContext::isAlignmentRequired(const Type *T) const { 1879 return getTypeInfo(T).AlignIsRequired; 1880 } 1881 1882 bool ASTContext::isAlignmentRequired(QualType T) const { 1883 return isAlignmentRequired(T.getTypePtr()); 1884 } 1885 1886 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1887 bool NeedsPreferredAlignment) const { 1888 // An alignment on a typedef overrides anything else. 1889 if (const auto *TT = T->getAs<TypedefType>()) 1890 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1891 return Align; 1892 1893 // If we have an (array of) complete type, we're done. 1894 T = getBaseElementType(T); 1895 if (!T->isIncompleteType()) 1896 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1897 1898 // If we had an array type, its element type might be a typedef 1899 // type with an alignment attribute. 1900 if (const auto *TT = T->getAs<TypedefType>()) 1901 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1902 return Align; 1903 1904 // Otherwise, see if the declaration of the type had an attribute. 1905 if (const auto *TT = T->getAs<TagType>()) 1906 return TT->getDecl()->getMaxAlignment(); 1907 1908 return 0; 1909 } 1910 1911 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1912 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1913 if (I != MemoizedTypeInfo.end()) 1914 return I->second; 1915 1916 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1917 TypeInfo TI = getTypeInfoImpl(T); 1918 MemoizedTypeInfo[T] = TI; 1919 return TI; 1920 } 1921 1922 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1923 /// method does not work on incomplete types. 1924 /// 1925 /// FIXME: Pointers into different addr spaces could have different sizes and 1926 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1927 /// should take a QualType, &c. 1928 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1929 uint64_t Width = 0; 1930 unsigned Align = 8; 1931 bool AlignIsRequired = false; 1932 unsigned AS = 0; 1933 switch (T->getTypeClass()) { 1934 #define TYPE(Class, Base) 1935 #define ABSTRACT_TYPE(Class, Base) 1936 #define NON_CANONICAL_TYPE(Class, Base) 1937 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1938 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1939 case Type::Class: \ 1940 assert(!T->isDependentType() && "should not see dependent types here"); \ 1941 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1942 #include "clang/AST/TypeNodes.inc" 1943 llvm_unreachable("Should not see dependent types"); 1944 1945 case Type::FunctionNoProto: 1946 case Type::FunctionProto: 1947 // GCC extension: alignof(function) = 32 bits 1948 Width = 0; 1949 Align = 32; 1950 break; 1951 1952 case Type::IncompleteArray: 1953 case Type::VariableArray: 1954 case Type::ConstantArray: { 1955 // Model non-constant sized arrays as size zero, but track the alignment. 1956 uint64_t Size = 0; 1957 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1958 Size = CAT->getSize().getZExtValue(); 1959 1960 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1961 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1962 "Overflow in array type bit size evaluation"); 1963 Width = EltInfo.Width * Size; 1964 Align = EltInfo.Align; 1965 AlignIsRequired = EltInfo.AlignIsRequired; 1966 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1967 getTargetInfo().getPointerWidth(0) == 64) 1968 Width = llvm::alignTo(Width, Align); 1969 break; 1970 } 1971 1972 case Type::ExtVector: 1973 case Type::Vector: { 1974 const auto *VT = cast<VectorType>(T); 1975 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1976 Width = EltInfo.Width * VT->getNumElements(); 1977 Align = Width; 1978 // If the alignment is not a power of 2, round up to the next power of 2. 1979 // This happens for non-power-of-2 length vectors. 1980 if (Align & (Align-1)) { 1981 Align = llvm::NextPowerOf2(Align); 1982 Width = llvm::alignTo(Width, Align); 1983 } 1984 // Adjust the alignment based on the target max. 1985 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1986 if (TargetVectorAlign && TargetVectorAlign < Align) 1987 Align = TargetVectorAlign; 1988 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 1989 // Adjust the alignment for fixed-length SVE vectors. This is important 1990 // for non-power-of-2 vector lengths. 1991 Align = 128; 1992 else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 1993 // Adjust the alignment for fixed-length SVE predicates. 1994 Align = 16; 1995 break; 1996 } 1997 1998 case Type::ConstantMatrix: { 1999 const auto *MT = cast<ConstantMatrixType>(T); 2000 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 2001 // The internal layout of a matrix value is implementation defined. 2002 // Initially be ABI compatible with arrays with respect to alignment and 2003 // size. 2004 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 2005 Align = ElementInfo.Align; 2006 break; 2007 } 2008 2009 case Type::Builtin: 2010 switch (cast<BuiltinType>(T)->getKind()) { 2011 default: llvm_unreachable("Unknown builtin type!"); 2012 case BuiltinType::Void: 2013 // GCC extension: alignof(void) = 8 bits. 2014 Width = 0; 2015 Align = 8; 2016 break; 2017 case BuiltinType::Bool: 2018 Width = Target->getBoolWidth(); 2019 Align = Target->getBoolAlign(); 2020 break; 2021 case BuiltinType::Char_S: 2022 case BuiltinType::Char_U: 2023 case BuiltinType::UChar: 2024 case BuiltinType::SChar: 2025 case BuiltinType::Char8: 2026 Width = Target->getCharWidth(); 2027 Align = Target->getCharAlign(); 2028 break; 2029 case BuiltinType::WChar_S: 2030 case BuiltinType::WChar_U: 2031 Width = Target->getWCharWidth(); 2032 Align = Target->getWCharAlign(); 2033 break; 2034 case BuiltinType::Char16: 2035 Width = Target->getChar16Width(); 2036 Align = Target->getChar16Align(); 2037 break; 2038 case BuiltinType::Char32: 2039 Width = Target->getChar32Width(); 2040 Align = Target->getChar32Align(); 2041 break; 2042 case BuiltinType::UShort: 2043 case BuiltinType::Short: 2044 Width = Target->getShortWidth(); 2045 Align = Target->getShortAlign(); 2046 break; 2047 case BuiltinType::UInt: 2048 case BuiltinType::Int: 2049 Width = Target->getIntWidth(); 2050 Align = Target->getIntAlign(); 2051 break; 2052 case BuiltinType::ULong: 2053 case BuiltinType::Long: 2054 Width = Target->getLongWidth(); 2055 Align = Target->getLongAlign(); 2056 break; 2057 case BuiltinType::ULongLong: 2058 case BuiltinType::LongLong: 2059 Width = Target->getLongLongWidth(); 2060 Align = Target->getLongLongAlign(); 2061 break; 2062 case BuiltinType::Int128: 2063 case BuiltinType::UInt128: 2064 Width = 128; 2065 Align = 128; // int128_t is 128-bit aligned on all targets. 2066 break; 2067 case BuiltinType::ShortAccum: 2068 case BuiltinType::UShortAccum: 2069 case BuiltinType::SatShortAccum: 2070 case BuiltinType::SatUShortAccum: 2071 Width = Target->getShortAccumWidth(); 2072 Align = Target->getShortAccumAlign(); 2073 break; 2074 case BuiltinType::Accum: 2075 case BuiltinType::UAccum: 2076 case BuiltinType::SatAccum: 2077 case BuiltinType::SatUAccum: 2078 Width = Target->getAccumWidth(); 2079 Align = Target->getAccumAlign(); 2080 break; 2081 case BuiltinType::LongAccum: 2082 case BuiltinType::ULongAccum: 2083 case BuiltinType::SatLongAccum: 2084 case BuiltinType::SatULongAccum: 2085 Width = Target->getLongAccumWidth(); 2086 Align = Target->getLongAccumAlign(); 2087 break; 2088 case BuiltinType::ShortFract: 2089 case BuiltinType::UShortFract: 2090 case BuiltinType::SatShortFract: 2091 case BuiltinType::SatUShortFract: 2092 Width = Target->getShortFractWidth(); 2093 Align = Target->getShortFractAlign(); 2094 break; 2095 case BuiltinType::Fract: 2096 case BuiltinType::UFract: 2097 case BuiltinType::SatFract: 2098 case BuiltinType::SatUFract: 2099 Width = Target->getFractWidth(); 2100 Align = Target->getFractAlign(); 2101 break; 2102 case BuiltinType::LongFract: 2103 case BuiltinType::ULongFract: 2104 case BuiltinType::SatLongFract: 2105 case BuiltinType::SatULongFract: 2106 Width = Target->getLongFractWidth(); 2107 Align = Target->getLongFractAlign(); 2108 break; 2109 case BuiltinType::BFloat16: 2110 Width = Target->getBFloat16Width(); 2111 Align = Target->getBFloat16Align(); 2112 break; 2113 case BuiltinType::Float16: 2114 case BuiltinType::Half: 2115 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2116 !getLangOpts().OpenMPIsDevice) { 2117 Width = Target->getHalfWidth(); 2118 Align = Target->getHalfAlign(); 2119 } else { 2120 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2121 "Expected OpenMP device compilation."); 2122 Width = AuxTarget->getHalfWidth(); 2123 Align = AuxTarget->getHalfAlign(); 2124 } 2125 break; 2126 case BuiltinType::Float: 2127 Width = Target->getFloatWidth(); 2128 Align = Target->getFloatAlign(); 2129 break; 2130 case BuiltinType::Double: 2131 Width = Target->getDoubleWidth(); 2132 Align = Target->getDoubleAlign(); 2133 break; 2134 case BuiltinType::LongDouble: 2135 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2136 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2137 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2138 Width = AuxTarget->getLongDoubleWidth(); 2139 Align = AuxTarget->getLongDoubleAlign(); 2140 } else { 2141 Width = Target->getLongDoubleWidth(); 2142 Align = Target->getLongDoubleAlign(); 2143 } 2144 break; 2145 case BuiltinType::Float128: 2146 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2147 !getLangOpts().OpenMPIsDevice) { 2148 Width = Target->getFloat128Width(); 2149 Align = Target->getFloat128Align(); 2150 } else { 2151 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2152 "Expected OpenMP device compilation."); 2153 Width = AuxTarget->getFloat128Width(); 2154 Align = AuxTarget->getFloat128Align(); 2155 } 2156 break; 2157 case BuiltinType::NullPtr: 2158 Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) 2159 Align = Target->getPointerAlign(0); // == sizeof(void*) 2160 break; 2161 case BuiltinType::ObjCId: 2162 case BuiltinType::ObjCClass: 2163 case BuiltinType::ObjCSel: 2164 Width = Target->getPointerWidth(0); 2165 Align = Target->getPointerAlign(0); 2166 break; 2167 case BuiltinType::OCLSampler: 2168 case BuiltinType::OCLEvent: 2169 case BuiltinType::OCLClkEvent: 2170 case BuiltinType::OCLQueue: 2171 case BuiltinType::OCLReserveID: 2172 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2173 case BuiltinType::Id: 2174 #include "clang/Basic/OpenCLImageTypes.def" 2175 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2176 case BuiltinType::Id: 2177 #include "clang/Basic/OpenCLExtensionTypes.def" 2178 AS = getTargetAddressSpace( 2179 Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T))); 2180 Width = Target->getPointerWidth(AS); 2181 Align = Target->getPointerAlign(AS); 2182 break; 2183 // The SVE types are effectively target-specific. The length of an 2184 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2185 // of 128 bits. There is one predicate bit for each vector byte, so the 2186 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2187 // 2188 // Because the length is only known at runtime, we use a dummy value 2189 // of 0 for the static length. The alignment values are those defined 2190 // by the Procedure Call Standard for the Arm Architecture. 2191 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2192 IsSigned, IsFP, IsBF) \ 2193 case BuiltinType::Id: \ 2194 Width = 0; \ 2195 Align = 128; \ 2196 break; 2197 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2198 case BuiltinType::Id: \ 2199 Width = 0; \ 2200 Align = 16; \ 2201 break; 2202 #include "clang/Basic/AArch64SVEACLETypes.def" 2203 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2204 case BuiltinType::Id: \ 2205 Width = Size; \ 2206 Align = Size; \ 2207 break; 2208 #include "clang/Basic/PPCTypes.def" 2209 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2210 IsFP) \ 2211 case BuiltinType::Id: \ 2212 Width = 0; \ 2213 Align = ElBits; \ 2214 break; 2215 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2216 case BuiltinType::Id: \ 2217 Width = 0; \ 2218 Align = 8; \ 2219 break; 2220 #include "clang/Basic/RISCVVTypes.def" 2221 } 2222 break; 2223 case Type::ObjCObjectPointer: 2224 Width = Target->getPointerWidth(0); 2225 Align = Target->getPointerAlign(0); 2226 break; 2227 case Type::BlockPointer: 2228 AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType()); 2229 Width = Target->getPointerWidth(AS); 2230 Align = Target->getPointerAlign(AS); 2231 break; 2232 case Type::LValueReference: 2233 case Type::RValueReference: 2234 // alignof and sizeof should never enter this code path here, so we go 2235 // the pointer route. 2236 AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType()); 2237 Width = Target->getPointerWidth(AS); 2238 Align = Target->getPointerAlign(AS); 2239 break; 2240 case Type::Pointer: 2241 AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); 2242 Width = Target->getPointerWidth(AS); 2243 Align = Target->getPointerAlign(AS); 2244 break; 2245 case Type::MemberPointer: { 2246 const auto *MPT = cast<MemberPointerType>(T); 2247 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2248 Width = MPI.Width; 2249 Align = MPI.Align; 2250 break; 2251 } 2252 case Type::Complex: { 2253 // Complex types have the same alignment as their elements, but twice the 2254 // size. 2255 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2256 Width = EltInfo.Width * 2; 2257 Align = EltInfo.Align; 2258 break; 2259 } 2260 case Type::ObjCObject: 2261 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2262 case Type::Adjusted: 2263 case Type::Decayed: 2264 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2265 case Type::ObjCInterface: { 2266 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2267 if (ObjCI->getDecl()->isInvalidDecl()) { 2268 Width = 8; 2269 Align = 8; 2270 break; 2271 } 2272 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2273 Width = toBits(Layout.getSize()); 2274 Align = toBits(Layout.getAlignment()); 2275 break; 2276 } 2277 case Type::ExtInt: { 2278 const auto *EIT = cast<ExtIntType>(T); 2279 Align = 2280 std::min(static_cast<unsigned>(std::max( 2281 getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), 2282 Target->getLongLongAlign()); 2283 Width = llvm::alignTo(EIT->getNumBits(), Align); 2284 break; 2285 } 2286 case Type::Record: 2287 case Type::Enum: { 2288 const auto *TT = cast<TagType>(T); 2289 2290 if (TT->getDecl()->isInvalidDecl()) { 2291 Width = 8; 2292 Align = 8; 2293 break; 2294 } 2295 2296 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2297 const EnumDecl *ED = ET->getDecl(); 2298 TypeInfo Info = 2299 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2300 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2301 Info.Align = AttrAlign; 2302 Info.AlignIsRequired = true; 2303 } 2304 return Info; 2305 } 2306 2307 const auto *RT = cast<RecordType>(TT); 2308 const RecordDecl *RD = RT->getDecl(); 2309 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2310 Width = toBits(Layout.getSize()); 2311 Align = toBits(Layout.getAlignment()); 2312 AlignIsRequired = RD->hasAttr<AlignedAttr>(); 2313 break; 2314 } 2315 2316 case Type::SubstTemplateTypeParm: 2317 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2318 getReplacementType().getTypePtr()); 2319 2320 case Type::Auto: 2321 case Type::DeducedTemplateSpecialization: { 2322 const auto *A = cast<DeducedType>(T); 2323 assert(!A->getDeducedType().isNull() && 2324 "cannot request the size of an undeduced or dependent auto type"); 2325 return getTypeInfo(A->getDeducedType().getTypePtr()); 2326 } 2327 2328 case Type::Paren: 2329 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2330 2331 case Type::MacroQualified: 2332 return getTypeInfo( 2333 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2334 2335 case Type::ObjCTypeParam: 2336 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2337 2338 case Type::Typedef: { 2339 const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); 2340 TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); 2341 // If the typedef has an aligned attribute on it, it overrides any computed 2342 // alignment we have. This violates the GCC documentation (which says that 2343 // attribute(aligned) can only round up) but matches its implementation. 2344 if (unsigned AttrAlign = Typedef->getMaxAlignment()) { 2345 Align = AttrAlign; 2346 AlignIsRequired = true; 2347 } else { 2348 Align = Info.Align; 2349 AlignIsRequired = Info.AlignIsRequired; 2350 } 2351 Width = Info.Width; 2352 break; 2353 } 2354 2355 case Type::Elaborated: 2356 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2357 2358 case Type::Attributed: 2359 return getTypeInfo( 2360 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2361 2362 case Type::Atomic: { 2363 // Start with the base type information. 2364 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2365 Width = Info.Width; 2366 Align = Info.Align; 2367 2368 if (!Width) { 2369 // An otherwise zero-sized type should still generate an 2370 // atomic operation. 2371 Width = Target->getCharWidth(); 2372 assert(Align); 2373 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2374 // If the size of the type doesn't exceed the platform's max 2375 // atomic promotion width, make the size and alignment more 2376 // favorable to atomic operations: 2377 2378 // Round the size up to a power of 2. 2379 if (!llvm::isPowerOf2_64(Width)) 2380 Width = llvm::NextPowerOf2(Width); 2381 2382 // Set the alignment equal to the size. 2383 Align = static_cast<unsigned>(Width); 2384 } 2385 } 2386 break; 2387 2388 case Type::Pipe: 2389 Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global)); 2390 Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global)); 2391 break; 2392 } 2393 2394 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2395 return TypeInfo(Width, Align, AlignIsRequired); 2396 } 2397 2398 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2399 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2400 if (I != MemoizedUnadjustedAlign.end()) 2401 return I->second; 2402 2403 unsigned UnadjustedAlign; 2404 if (const auto *RT = T->getAs<RecordType>()) { 2405 const RecordDecl *RD = RT->getDecl(); 2406 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2407 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2408 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2409 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2410 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2411 } else { 2412 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2413 } 2414 2415 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2416 return UnadjustedAlign; 2417 } 2418 2419 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2420 unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); 2421 return SimdAlign; 2422 } 2423 2424 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2425 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2426 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2427 } 2428 2429 /// toBits - Convert a size in characters to a size in characters. 2430 int64_t ASTContext::toBits(CharUnits CharSize) const { 2431 return CharSize.getQuantity() * getCharWidth(); 2432 } 2433 2434 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2435 /// This method does not work on incomplete types. 2436 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2437 return getTypeInfoInChars(T).Width; 2438 } 2439 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2440 return getTypeInfoInChars(T).Width; 2441 } 2442 2443 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2444 /// characters. This method does not work on incomplete types. 2445 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2446 return toCharUnitsFromBits(getTypeAlign(T)); 2447 } 2448 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2449 return toCharUnitsFromBits(getTypeAlign(T)); 2450 } 2451 2452 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2453 /// type, in characters, before alignment adustments. This method does 2454 /// not work on incomplete types. 2455 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2456 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2457 } 2458 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2459 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2460 } 2461 2462 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2463 /// type for the current target in bits. This can be different than the ABI 2464 /// alignment in cases where it is beneficial for performance or backwards 2465 /// compatibility preserving to overalign a data type. (Note: despite the name, 2466 /// the preferred alignment is ABI-impacting, and not an optimization.) 2467 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2468 TypeInfo TI = getTypeInfo(T); 2469 unsigned ABIAlign = TI.Align; 2470 2471 T = T->getBaseElementTypeUnsafe(); 2472 2473 // The preferred alignment of member pointers is that of a pointer. 2474 if (T->isMemberPointerType()) 2475 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2476 2477 if (!Target->allowsLargerPreferedTypeAlignment()) 2478 return ABIAlign; 2479 2480 if (const auto *RT = T->getAs<RecordType>()) { 2481 if (TI.AlignIsRequired || RT->getDecl()->isInvalidDecl()) 2482 return ABIAlign; 2483 2484 unsigned PreferredAlign = static_cast<unsigned>( 2485 toBits(getASTRecordLayout(RT->getDecl()).PreferredAlignment)); 2486 assert(PreferredAlign >= ABIAlign && 2487 "PreferredAlign should be at least as large as ABIAlign."); 2488 return PreferredAlign; 2489 } 2490 2491 // Double (and, for targets supporting AIX `power` alignment, long double) and 2492 // long long should be naturally aligned (despite requiring less alignment) if 2493 // possible. 2494 if (const auto *CT = T->getAs<ComplexType>()) 2495 T = CT->getElementType().getTypePtr(); 2496 if (const auto *ET = T->getAs<EnumType>()) 2497 T = ET->getDecl()->getIntegerType().getTypePtr(); 2498 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2499 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2500 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2501 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2502 Target->defaultsToAIXPowerAlignment())) 2503 // Don't increase the alignment if an alignment attribute was specified on a 2504 // typedef declaration. 2505 if (!TI.AlignIsRequired) 2506 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2507 2508 return ABIAlign; 2509 } 2510 2511 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2512 /// for __attribute__((aligned)) on this target, to be used if no alignment 2513 /// value is specified. 2514 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2515 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2516 } 2517 2518 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2519 /// to a global variable of the specified type. 2520 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2521 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2522 return std::max(getPreferredTypeAlign(T), 2523 getTargetInfo().getMinGlobalAlign(TypeSize)); 2524 } 2525 2526 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2527 /// should be given to a global variable of the specified type. 2528 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2529 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2530 } 2531 2532 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2533 CharUnits Offset = CharUnits::Zero(); 2534 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2535 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2536 Offset += Layout->getBaseClassOffset(Base); 2537 Layout = &getASTRecordLayout(Base); 2538 } 2539 return Offset; 2540 } 2541 2542 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2543 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2544 CharUnits ThisAdjustment = CharUnits::Zero(); 2545 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2546 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2547 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2548 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2549 const CXXRecordDecl *Base = RD; 2550 const CXXRecordDecl *Derived = Path[I]; 2551 if (DerivedMember) 2552 std::swap(Base, Derived); 2553 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2554 RD = Path[I]; 2555 } 2556 if (DerivedMember) 2557 ThisAdjustment = -ThisAdjustment; 2558 return ThisAdjustment; 2559 } 2560 2561 /// DeepCollectObjCIvars - 2562 /// This routine first collects all declared, but not synthesized, ivars in 2563 /// super class and then collects all ivars, including those synthesized for 2564 /// current class. This routine is used for implementation of current class 2565 /// when all ivars, declared and synthesized are known. 2566 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2567 bool leafClass, 2568 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2569 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2570 DeepCollectObjCIvars(SuperClass, false, Ivars); 2571 if (!leafClass) { 2572 for (const auto *I : OI->ivars()) 2573 Ivars.push_back(I); 2574 } else { 2575 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2576 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2577 Iv= Iv->getNextIvar()) 2578 Ivars.push_back(Iv); 2579 } 2580 } 2581 2582 /// CollectInheritedProtocols - Collect all protocols in current class and 2583 /// those inherited by it. 2584 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2585 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2586 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2587 // We can use protocol_iterator here instead of 2588 // all_referenced_protocol_iterator since we are walking all categories. 2589 for (auto *Proto : OI->all_referenced_protocols()) { 2590 CollectInheritedProtocols(Proto, Protocols); 2591 } 2592 2593 // Categories of this Interface. 2594 for (const auto *Cat : OI->visible_categories()) 2595 CollectInheritedProtocols(Cat, Protocols); 2596 2597 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2598 while (SD) { 2599 CollectInheritedProtocols(SD, Protocols); 2600 SD = SD->getSuperClass(); 2601 } 2602 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2603 for (auto *Proto : OC->protocols()) { 2604 CollectInheritedProtocols(Proto, Protocols); 2605 } 2606 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2607 // Insert the protocol. 2608 if (!Protocols.insert( 2609 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2610 return; 2611 2612 for (auto *Proto : OP->protocols()) 2613 CollectInheritedProtocols(Proto, Protocols); 2614 } 2615 } 2616 2617 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2618 const RecordDecl *RD) { 2619 assert(RD->isUnion() && "Must be union type"); 2620 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2621 2622 for (const auto *Field : RD->fields()) { 2623 if (!Context.hasUniqueObjectRepresentations(Field->getType())) 2624 return false; 2625 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2626 if (FieldSize != UnionSize) 2627 return false; 2628 } 2629 return !RD->field_empty(); 2630 } 2631 2632 static bool isStructEmpty(QualType Ty) { 2633 const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl(); 2634 2635 if (!RD->field_empty()) 2636 return false; 2637 2638 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) 2639 return ClassDecl->isEmpty(); 2640 2641 return true; 2642 } 2643 2644 static llvm::Optional<int64_t> 2645 structHasUniqueObjectRepresentations(const ASTContext &Context, 2646 const RecordDecl *RD) { 2647 assert(!RD->isUnion() && "Must be struct/class type"); 2648 const auto &Layout = Context.getASTRecordLayout(RD); 2649 2650 int64_t CurOffsetInBits = 0; 2651 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2652 if (ClassDecl->isDynamicClass()) 2653 return llvm::None; 2654 2655 SmallVector<std::pair<QualType, int64_t>, 4> Bases; 2656 for (const auto &Base : ClassDecl->bases()) { 2657 // Empty types can be inherited from, and non-empty types can potentially 2658 // have tail padding, so just make sure there isn't an error. 2659 if (!isStructEmpty(Base.getType())) { 2660 llvm::Optional<int64_t> Size = structHasUniqueObjectRepresentations( 2661 Context, Base.getType()->castAs<RecordType>()->getDecl()); 2662 if (!Size) 2663 return llvm::None; 2664 Bases.emplace_back(Base.getType(), Size.getValue()); 2665 } 2666 } 2667 2668 llvm::sort(Bases, [&](const std::pair<QualType, int64_t> &L, 2669 const std::pair<QualType, int64_t> &R) { 2670 return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) < 2671 Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl()); 2672 }); 2673 2674 for (const auto &Base : Bases) { 2675 int64_t BaseOffset = Context.toBits( 2676 Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl())); 2677 int64_t BaseSize = Base.second; 2678 if (BaseOffset != CurOffsetInBits) 2679 return llvm::None; 2680 CurOffsetInBits = BaseOffset + BaseSize; 2681 } 2682 } 2683 2684 for (const auto *Field : RD->fields()) { 2685 if (!Field->getType()->isReferenceType() && 2686 !Context.hasUniqueObjectRepresentations(Field->getType())) 2687 return llvm::None; 2688 2689 int64_t FieldSizeInBits = 2690 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2691 if (Field->isBitField()) { 2692 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2693 2694 if (BitfieldSize > FieldSizeInBits) 2695 return llvm::None; 2696 FieldSizeInBits = BitfieldSize; 2697 } 2698 2699 int64_t FieldOffsetInBits = Context.getFieldOffset(Field); 2700 2701 if (FieldOffsetInBits != CurOffsetInBits) 2702 return llvm::None; 2703 2704 CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits; 2705 } 2706 2707 return CurOffsetInBits; 2708 } 2709 2710 bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { 2711 // C++17 [meta.unary.prop]: 2712 // The predicate condition for a template specialization 2713 // has_unique_object_representations<T> shall be 2714 // satisfied if and only if: 2715 // (9.1) - T is trivially copyable, and 2716 // (9.2) - any two objects of type T with the same value have the same 2717 // object representation, where two objects 2718 // of array or non-union class type are considered to have the same value 2719 // if their respective sequences of 2720 // direct subobjects have the same values, and two objects of union type 2721 // are considered to have the same 2722 // value if they have the same active member and the corresponding members 2723 // have the same value. 2724 // The set of scalar types for which this condition holds is 2725 // implementation-defined. [ Note: If a type has padding 2726 // bits, the condition does not hold; otherwise, the condition holds true 2727 // for unsigned integral types. -- end note ] 2728 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2729 2730 // Arrays are unique only if their element type is unique. 2731 if (Ty->isArrayType()) 2732 return hasUniqueObjectRepresentations(getBaseElementType(Ty)); 2733 2734 // (9.1) - T is trivially copyable... 2735 if (!Ty.isTriviallyCopyableType(*this)) 2736 return false; 2737 2738 // All integrals and enums are unique. 2739 if (Ty->isIntegralOrEnumerationType()) 2740 return true; 2741 2742 // All other pointers are unique. 2743 if (Ty->isPointerType()) 2744 return true; 2745 2746 if (Ty->isMemberPointerType()) { 2747 const auto *MPT = Ty->getAs<MemberPointerType>(); 2748 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2749 } 2750 2751 if (Ty->isRecordType()) { 2752 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2753 2754 if (Record->isInvalidDecl()) 2755 return false; 2756 2757 if (Record->isUnion()) 2758 return unionHasUniqueObjectRepresentations(*this, Record); 2759 2760 Optional<int64_t> StructSize = 2761 structHasUniqueObjectRepresentations(*this, Record); 2762 2763 return StructSize && 2764 StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty)); 2765 } 2766 2767 // FIXME: More cases to handle here (list by rsmith): 2768 // vectors (careful about, eg, vector of 3 foo) 2769 // _Complex int and friends 2770 // _Atomic T 2771 // Obj-C block pointers 2772 // Obj-C object pointers 2773 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2774 // clk_event_t, queue_t, reserve_id_t) 2775 // There're also Obj-C class types and the Obj-C selector type, but I think it 2776 // makes sense for those to return false here. 2777 2778 return false; 2779 } 2780 2781 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2782 unsigned count = 0; 2783 // Count ivars declared in class extension. 2784 for (const auto *Ext : OI->known_extensions()) 2785 count += Ext->ivar_size(); 2786 2787 // Count ivar defined in this class's implementation. This 2788 // includes synthesized ivars. 2789 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2790 count += ImplDecl->ivar_size(); 2791 2792 return count; 2793 } 2794 2795 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2796 if (!E) 2797 return false; 2798 2799 // nullptr_t is always treated as null. 2800 if (E->getType()->isNullPtrType()) return true; 2801 2802 if (E->getType()->isAnyPointerType() && 2803 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2804 Expr::NPC_ValueDependentIsNull)) 2805 return true; 2806 2807 // Unfortunately, __null has type 'int'. 2808 if (isa<GNUNullExpr>(E)) return true; 2809 2810 return false; 2811 } 2812 2813 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2814 /// exists. 2815 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2816 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2817 I = ObjCImpls.find(D); 2818 if (I != ObjCImpls.end()) 2819 return cast<ObjCImplementationDecl>(I->second); 2820 return nullptr; 2821 } 2822 2823 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2824 /// exists. 2825 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2826 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2827 I = ObjCImpls.find(D); 2828 if (I != ObjCImpls.end()) 2829 return cast<ObjCCategoryImplDecl>(I->second); 2830 return nullptr; 2831 } 2832 2833 /// Set the implementation of ObjCInterfaceDecl. 2834 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2835 ObjCImplementationDecl *ImplD) { 2836 assert(IFaceD && ImplD && "Passed null params"); 2837 ObjCImpls[IFaceD] = ImplD; 2838 } 2839 2840 /// Set the implementation of ObjCCategoryDecl. 2841 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2842 ObjCCategoryImplDecl *ImplD) { 2843 assert(CatD && ImplD && "Passed null params"); 2844 ObjCImpls[CatD] = ImplD; 2845 } 2846 2847 const ObjCMethodDecl * 2848 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2849 return ObjCMethodRedecls.lookup(MD); 2850 } 2851 2852 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2853 const ObjCMethodDecl *Redecl) { 2854 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2855 ObjCMethodRedecls[MD] = Redecl; 2856 } 2857 2858 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2859 const NamedDecl *ND) const { 2860 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2861 return ID; 2862 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2863 return CD->getClassInterface(); 2864 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2865 return IMD->getClassInterface(); 2866 2867 return nullptr; 2868 } 2869 2870 /// Get the copy initialization expression of VarDecl, or nullptr if 2871 /// none exists. 2872 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2873 assert(VD && "Passed null params"); 2874 assert(VD->hasAttr<BlocksAttr>() && 2875 "getBlockVarCopyInits - not __block var"); 2876 auto I = BlockVarCopyInits.find(VD); 2877 if (I != BlockVarCopyInits.end()) 2878 return I->second; 2879 return {nullptr, false}; 2880 } 2881 2882 /// Set the copy initialization expression of a block var decl. 2883 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2884 bool CanThrow) { 2885 assert(VD && CopyExpr && "Passed null params"); 2886 assert(VD->hasAttr<BlocksAttr>() && 2887 "setBlockVarCopyInits - not __block var"); 2888 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2889 } 2890 2891 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2892 unsigned DataSize) const { 2893 if (!DataSize) 2894 DataSize = TypeLoc::getFullDataSizeForType(T); 2895 else 2896 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2897 "incorrect data size provided to CreateTypeSourceInfo!"); 2898 2899 auto *TInfo = 2900 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2901 new (TInfo) TypeSourceInfo(T); 2902 return TInfo; 2903 } 2904 2905 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2906 SourceLocation L) const { 2907 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2908 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2909 return DI; 2910 } 2911 2912 const ASTRecordLayout & 2913 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2914 return getObjCLayout(D, nullptr); 2915 } 2916 2917 const ASTRecordLayout & 2918 ASTContext::getASTObjCImplementationLayout( 2919 const ObjCImplementationDecl *D) const { 2920 return getObjCLayout(D->getClassInterface(), D); 2921 } 2922 2923 //===----------------------------------------------------------------------===// 2924 // Type creation/memoization methods 2925 //===----------------------------------------------------------------------===// 2926 2927 QualType 2928 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 2929 unsigned fastQuals = quals.getFastQualifiers(); 2930 quals.removeFastQualifiers(); 2931 2932 // Check if we've already instantiated this type. 2933 llvm::FoldingSetNodeID ID; 2934 ExtQuals::Profile(ID, baseType, quals); 2935 void *insertPos = nullptr; 2936 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 2937 assert(eq->getQualifiers() == quals); 2938 return QualType(eq, fastQuals); 2939 } 2940 2941 // If the base type is not canonical, make the appropriate canonical type. 2942 QualType canon; 2943 if (!baseType->isCanonicalUnqualified()) { 2944 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 2945 canonSplit.Quals.addConsistentQualifiers(quals); 2946 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 2947 2948 // Re-find the insert position. 2949 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 2950 } 2951 2952 auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 2953 ExtQualNodes.InsertNode(eq, insertPos); 2954 return QualType(eq, fastQuals); 2955 } 2956 2957 QualType ASTContext::getAddrSpaceQualType(QualType T, 2958 LangAS AddressSpace) const { 2959 QualType CanT = getCanonicalType(T); 2960 if (CanT.getAddressSpace() == AddressSpace) 2961 return T; 2962 2963 // If we are composing extended qualifiers together, merge together 2964 // into one ExtQuals node. 2965 QualifierCollector Quals; 2966 const Type *TypeNode = Quals.strip(T); 2967 2968 // If this type already has an address space specified, it cannot get 2969 // another one. 2970 assert(!Quals.hasAddressSpace() && 2971 "Type cannot be in multiple addr spaces!"); 2972 Quals.addAddressSpace(AddressSpace); 2973 2974 return getExtQualType(TypeNode, Quals); 2975 } 2976 2977 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 2978 // If the type is not qualified with an address space, just return it 2979 // immediately. 2980 if (!T.hasAddressSpace()) 2981 return T; 2982 2983 // If we are composing extended qualifiers together, merge together 2984 // into one ExtQuals node. 2985 QualifierCollector Quals; 2986 const Type *TypeNode; 2987 2988 while (T.hasAddressSpace()) { 2989 TypeNode = Quals.strip(T); 2990 2991 // If the type no longer has an address space after stripping qualifiers, 2992 // jump out. 2993 if (!QualType(TypeNode, 0).hasAddressSpace()) 2994 break; 2995 2996 // There might be sugar in the way. Strip it and try again. 2997 T = T.getSingleStepDesugaredType(*this); 2998 } 2999 3000 Quals.removeAddressSpace(); 3001 3002 // Removal of the address space can mean there are no longer any 3003 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3004 // or required. 3005 if (Quals.hasNonFastQualifiers()) 3006 return getExtQualType(TypeNode, Quals); 3007 else 3008 return QualType(TypeNode, Quals.getFastQualifiers()); 3009 } 3010 3011 QualType ASTContext::getObjCGCQualType(QualType T, 3012 Qualifiers::GC GCAttr) const { 3013 QualType CanT = getCanonicalType(T); 3014 if (CanT.getObjCGCAttr() == GCAttr) 3015 return T; 3016 3017 if (const auto *ptr = T->getAs<PointerType>()) { 3018 QualType Pointee = ptr->getPointeeType(); 3019 if (Pointee->isAnyPointerType()) { 3020 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3021 return getPointerType(ResultType); 3022 } 3023 } 3024 3025 // If we are composing extended qualifiers together, merge together 3026 // into one ExtQuals node. 3027 QualifierCollector Quals; 3028 const Type *TypeNode = Quals.strip(T); 3029 3030 // If this type already has an ObjCGC specified, it cannot get 3031 // another one. 3032 assert(!Quals.hasObjCGCAttr() && 3033 "Type cannot have multiple ObjCGCs!"); 3034 Quals.addObjCGCAttr(GCAttr); 3035 3036 return getExtQualType(TypeNode, Quals); 3037 } 3038 3039 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3040 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3041 QualType Pointee = Ptr->getPointeeType(); 3042 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3043 return getPointerType(removeAddrSpaceQualType(Pointee)); 3044 } 3045 } 3046 return T; 3047 } 3048 3049 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3050 FunctionType::ExtInfo Info) { 3051 if (T->getExtInfo() == Info) 3052 return T; 3053 3054 QualType Result; 3055 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3056 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3057 } else { 3058 const auto *FPT = cast<FunctionProtoType>(T); 3059 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3060 EPI.ExtInfo = Info; 3061 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3062 } 3063 3064 return cast<FunctionType>(Result.getTypePtr()); 3065 } 3066 3067 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3068 QualType ResultType) { 3069 FD = FD->getMostRecentDecl(); 3070 while (true) { 3071 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3072 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3073 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3074 if (FunctionDecl *Next = FD->getPreviousDecl()) 3075 FD = Next; 3076 else 3077 break; 3078 } 3079 if (ASTMutationListener *L = getASTMutationListener()) 3080 L->DeducedReturnType(FD, ResultType); 3081 } 3082 3083 /// Get a function type and produce the equivalent function type with the 3084 /// specified exception specification. Type sugar that can be present on a 3085 /// declaration of a function with an exception specification is permitted 3086 /// and preserved. Other type sugar (for instance, typedefs) is not. 3087 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3088 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) { 3089 // Might have some parens. 3090 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3091 return getParenType( 3092 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3093 3094 // Might be wrapped in a macro qualified type. 3095 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3096 return getMacroQualifiedType( 3097 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3098 MQT->getMacroIdentifier()); 3099 3100 // Might have a calling-convention attribute. 3101 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3102 return getAttributedType( 3103 AT->getAttrKind(), 3104 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3105 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3106 3107 // Anything else must be a function type. Rebuild it with the new exception 3108 // specification. 3109 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3110 return getFunctionType( 3111 Proto->getReturnType(), Proto->getParamTypes(), 3112 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3113 } 3114 3115 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3116 QualType U) { 3117 return hasSameType(T, U) || 3118 (getLangOpts().CPlusPlus17 && 3119 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3120 getFunctionTypeWithExceptionSpec(U, EST_None))); 3121 } 3122 3123 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3124 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3125 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3126 SmallVector<QualType, 16> Args(Proto->param_types()); 3127 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3128 Args[i] = removePtrSizeAddrSpace(Args[i]); 3129 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3130 } 3131 3132 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3133 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3134 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3135 } 3136 3137 return T; 3138 } 3139 3140 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3141 return hasSameType(T, U) || 3142 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3143 getFunctionTypeWithoutPtrSizes(U)); 3144 } 3145 3146 void ASTContext::adjustExceptionSpec( 3147 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3148 bool AsWritten) { 3149 // Update the type. 3150 QualType Updated = 3151 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3152 FD->setType(Updated); 3153 3154 if (!AsWritten) 3155 return; 3156 3157 // Update the type in the type source information too. 3158 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3159 // If the type and the type-as-written differ, we may need to update 3160 // the type-as-written too. 3161 if (TSInfo->getType() != FD->getType()) 3162 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3163 3164 // FIXME: When we get proper type location information for exceptions, 3165 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3166 // up the TypeSourceInfo; 3167 assert(TypeLoc::getFullDataSizeForType(Updated) == 3168 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3169 "TypeLoc size mismatch from updating exception specification"); 3170 TSInfo->overrideType(Updated); 3171 } 3172 } 3173 3174 /// getComplexType - Return the uniqued reference to the type for a complex 3175 /// number with the specified element type. 3176 QualType ASTContext::getComplexType(QualType T) const { 3177 // Unique pointers, to guarantee there is only one pointer of a particular 3178 // structure. 3179 llvm::FoldingSetNodeID ID; 3180 ComplexType::Profile(ID, T); 3181 3182 void *InsertPos = nullptr; 3183 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3184 return QualType(CT, 0); 3185 3186 // If the pointee type isn't canonical, this won't be a canonical type either, 3187 // so fill in the canonical type field. 3188 QualType Canonical; 3189 if (!T.isCanonical()) { 3190 Canonical = getComplexType(getCanonicalType(T)); 3191 3192 // Get the new insert position for the node we care about. 3193 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3194 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3195 } 3196 auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 3197 Types.push_back(New); 3198 ComplexTypes.InsertNode(New, InsertPos); 3199 return QualType(New, 0); 3200 } 3201 3202 /// getPointerType - Return the uniqued reference to the type for a pointer to 3203 /// the specified type. 3204 QualType ASTContext::getPointerType(QualType T) const { 3205 // Unique pointers, to guarantee there is only one pointer of a particular 3206 // structure. 3207 llvm::FoldingSetNodeID ID; 3208 PointerType::Profile(ID, T); 3209 3210 void *InsertPos = nullptr; 3211 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3212 return QualType(PT, 0); 3213 3214 // If the pointee type isn't canonical, this won't be a canonical type either, 3215 // so fill in the canonical type field. 3216 QualType Canonical; 3217 if (!T.isCanonical()) { 3218 Canonical = getPointerType(getCanonicalType(T)); 3219 3220 // Get the new insert position for the node we care about. 3221 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3222 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3223 } 3224 auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); 3225 Types.push_back(New); 3226 PointerTypes.InsertNode(New, InsertPos); 3227 return QualType(New, 0); 3228 } 3229 3230 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3231 llvm::FoldingSetNodeID ID; 3232 AdjustedType::Profile(ID, Orig, New); 3233 void *InsertPos = nullptr; 3234 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3235 if (AT) 3236 return QualType(AT, 0); 3237 3238 QualType Canonical = getCanonicalType(New); 3239 3240 // Get the new insert position for the node we care about. 3241 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3242 assert(!AT && "Shouldn't be in the map!"); 3243 3244 AT = new (*this, TypeAlignment) 3245 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3246 Types.push_back(AT); 3247 AdjustedTypes.InsertNode(AT, InsertPos); 3248 return QualType(AT, 0); 3249 } 3250 3251 QualType ASTContext::getDecayedType(QualType T) const { 3252 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3253 3254 QualType Decayed; 3255 3256 // C99 6.7.5.3p7: 3257 // A declaration of a parameter as "array of type" shall be 3258 // adjusted to "qualified pointer to type", where the type 3259 // qualifiers (if any) are those specified within the [ and ] of 3260 // the array type derivation. 3261 if (T->isArrayType()) 3262 Decayed = getArrayDecayedType(T); 3263 3264 // C99 6.7.5.3p8: 3265 // A declaration of a parameter as "function returning type" 3266 // shall be adjusted to "pointer to function returning type", as 3267 // in 6.3.2.1. 3268 if (T->isFunctionType()) 3269 Decayed = getPointerType(T); 3270 3271 llvm::FoldingSetNodeID ID; 3272 AdjustedType::Profile(ID, T, Decayed); 3273 void *InsertPos = nullptr; 3274 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3275 if (AT) 3276 return QualType(AT, 0); 3277 3278 QualType Canonical = getCanonicalType(Decayed); 3279 3280 // Get the new insert position for the node we care about. 3281 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3282 assert(!AT && "Shouldn't be in the map!"); 3283 3284 AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); 3285 Types.push_back(AT); 3286 AdjustedTypes.InsertNode(AT, InsertPos); 3287 return QualType(AT, 0); 3288 } 3289 3290 /// getBlockPointerType - Return the uniqued reference to the type for 3291 /// a pointer to the specified block. 3292 QualType ASTContext::getBlockPointerType(QualType T) const { 3293 assert(T->isFunctionType() && "block of function types only"); 3294 // Unique pointers, to guarantee there is only one block of a particular 3295 // structure. 3296 llvm::FoldingSetNodeID ID; 3297 BlockPointerType::Profile(ID, T); 3298 3299 void *InsertPos = nullptr; 3300 if (BlockPointerType *PT = 3301 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3302 return QualType(PT, 0); 3303 3304 // If the block pointee type isn't canonical, this won't be a canonical 3305 // type either so fill in the canonical type field. 3306 QualType Canonical; 3307 if (!T.isCanonical()) { 3308 Canonical = getBlockPointerType(getCanonicalType(T)); 3309 3310 // Get the new insert position for the node we care about. 3311 BlockPointerType *NewIP = 3312 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3313 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3314 } 3315 auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 3316 Types.push_back(New); 3317 BlockPointerTypes.InsertNode(New, InsertPos); 3318 return QualType(New, 0); 3319 } 3320 3321 /// getLValueReferenceType - Return the uniqued reference to the type for an 3322 /// lvalue reference to the specified type. 3323 QualType 3324 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3325 assert(getCanonicalType(T) != OverloadTy && 3326 "Unresolved overloaded function type"); 3327 3328 // Unique pointers, to guarantee there is only one pointer of a particular 3329 // structure. 3330 llvm::FoldingSetNodeID ID; 3331 ReferenceType::Profile(ID, T, SpelledAsLValue); 3332 3333 void *InsertPos = nullptr; 3334 if (LValueReferenceType *RT = 3335 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3336 return QualType(RT, 0); 3337 3338 const auto *InnerRef = T->getAs<ReferenceType>(); 3339 3340 // If the referencee type isn't canonical, this won't be a canonical type 3341 // either, so fill in the canonical type field. 3342 QualType Canonical; 3343 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3344 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3345 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3346 3347 // Get the new insert position for the node we care about. 3348 LValueReferenceType *NewIP = 3349 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3350 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3351 } 3352 3353 auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 3354 SpelledAsLValue); 3355 Types.push_back(New); 3356 LValueReferenceTypes.InsertNode(New, InsertPos); 3357 3358 return QualType(New, 0); 3359 } 3360 3361 /// getRValueReferenceType - Return the uniqued reference to the type for an 3362 /// rvalue reference to the specified type. 3363 QualType ASTContext::getRValueReferenceType(QualType T) const { 3364 // Unique pointers, to guarantee there is only one pointer of a particular 3365 // structure. 3366 llvm::FoldingSetNodeID ID; 3367 ReferenceType::Profile(ID, T, false); 3368 3369 void *InsertPos = nullptr; 3370 if (RValueReferenceType *RT = 3371 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3372 return QualType(RT, 0); 3373 3374 const auto *InnerRef = T->getAs<ReferenceType>(); 3375 3376 // If the referencee type isn't canonical, this won't be a canonical type 3377 // either, so fill in the canonical type field. 3378 QualType Canonical; 3379 if (InnerRef || !T.isCanonical()) { 3380 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3381 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3382 3383 // Get the new insert position for the node we care about. 3384 RValueReferenceType *NewIP = 3385 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3386 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3387 } 3388 3389 auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 3390 Types.push_back(New); 3391 RValueReferenceTypes.InsertNode(New, InsertPos); 3392 return QualType(New, 0); 3393 } 3394 3395 /// getMemberPointerType - Return the uniqued reference to the type for a 3396 /// member pointer to the specified type, in the specified class. 3397 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3398 // Unique pointers, to guarantee there is only one pointer of a particular 3399 // structure. 3400 llvm::FoldingSetNodeID ID; 3401 MemberPointerType::Profile(ID, T, Cls); 3402 3403 void *InsertPos = nullptr; 3404 if (MemberPointerType *PT = 3405 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3406 return QualType(PT, 0); 3407 3408 // If the pointee or class type isn't canonical, this won't be a canonical 3409 // type either, so fill in the canonical type field. 3410 QualType Canonical; 3411 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3412 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3413 3414 // Get the new insert position for the node we care about. 3415 MemberPointerType *NewIP = 3416 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3417 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3418 } 3419 auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 3420 Types.push_back(New); 3421 MemberPointerTypes.InsertNode(New, InsertPos); 3422 return QualType(New, 0); 3423 } 3424 3425 /// getConstantArrayType - Return the unique reference to the type for an 3426 /// array of the specified element type. 3427 QualType ASTContext::getConstantArrayType(QualType EltTy, 3428 const llvm::APInt &ArySizeIn, 3429 const Expr *SizeExpr, 3430 ArrayType::ArraySizeModifier ASM, 3431 unsigned IndexTypeQuals) const { 3432 assert((EltTy->isDependentType() || 3433 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3434 "Constant array of VLAs is illegal!"); 3435 3436 // We only need the size as part of the type if it's instantiation-dependent. 3437 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3438 SizeExpr = nullptr; 3439 3440 // Convert the array size into a canonical width matching the pointer size for 3441 // the target. 3442 llvm::APInt ArySize(ArySizeIn); 3443 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3444 3445 llvm::FoldingSetNodeID ID; 3446 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3447 IndexTypeQuals); 3448 3449 void *InsertPos = nullptr; 3450 if (ConstantArrayType *ATP = 3451 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3452 return QualType(ATP, 0); 3453 3454 // If the element type isn't canonical or has qualifiers, or the array bound 3455 // is instantiation-dependent, this won't be a canonical type either, so fill 3456 // in the canonical type field. 3457 QualType Canon; 3458 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3459 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3460 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3461 ASM, IndexTypeQuals); 3462 Canon = getQualifiedType(Canon, canonSplit.Quals); 3463 3464 // Get the new insert position for the node we care about. 3465 ConstantArrayType *NewIP = 3466 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3467 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3468 } 3469 3470 void *Mem = Allocate( 3471 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3472 TypeAlignment); 3473 auto *New = new (Mem) 3474 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3475 ConstantArrayTypes.InsertNode(New, InsertPos); 3476 Types.push_back(New); 3477 return QualType(New, 0); 3478 } 3479 3480 /// getVariableArrayDecayedType - Turns the given type, which may be 3481 /// variably-modified, into the corresponding type with all the known 3482 /// sizes replaced with [*]. 3483 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3484 // Vastly most common case. 3485 if (!type->isVariablyModifiedType()) return type; 3486 3487 QualType result; 3488 3489 SplitQualType split = type.getSplitDesugaredType(); 3490 const Type *ty = split.Ty; 3491 switch (ty->getTypeClass()) { 3492 #define TYPE(Class, Base) 3493 #define ABSTRACT_TYPE(Class, Base) 3494 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3495 #include "clang/AST/TypeNodes.inc" 3496 llvm_unreachable("didn't desugar past all non-canonical types?"); 3497 3498 // These types should never be variably-modified. 3499 case Type::Builtin: 3500 case Type::Complex: 3501 case Type::Vector: 3502 case Type::DependentVector: 3503 case Type::ExtVector: 3504 case Type::DependentSizedExtVector: 3505 case Type::ConstantMatrix: 3506 case Type::DependentSizedMatrix: 3507 case Type::DependentAddressSpace: 3508 case Type::ObjCObject: 3509 case Type::ObjCInterface: 3510 case Type::ObjCObjectPointer: 3511 case Type::Record: 3512 case Type::Enum: 3513 case Type::UnresolvedUsing: 3514 case Type::TypeOfExpr: 3515 case Type::TypeOf: 3516 case Type::Decltype: 3517 case Type::UnaryTransform: 3518 case Type::DependentName: 3519 case Type::InjectedClassName: 3520 case Type::TemplateSpecialization: 3521 case Type::DependentTemplateSpecialization: 3522 case Type::TemplateTypeParm: 3523 case Type::SubstTemplateTypeParmPack: 3524 case Type::Auto: 3525 case Type::DeducedTemplateSpecialization: 3526 case Type::PackExpansion: 3527 case Type::ExtInt: 3528 case Type::DependentExtInt: 3529 llvm_unreachable("type should never be variably-modified"); 3530 3531 // These types can be variably-modified but should never need to 3532 // further decay. 3533 case Type::FunctionNoProto: 3534 case Type::FunctionProto: 3535 case Type::BlockPointer: 3536 case Type::MemberPointer: 3537 case Type::Pipe: 3538 return type; 3539 3540 // These types can be variably-modified. All these modifications 3541 // preserve structure except as noted by comments. 3542 // TODO: if we ever care about optimizing VLAs, there are no-op 3543 // optimizations available here. 3544 case Type::Pointer: 3545 result = getPointerType(getVariableArrayDecayedType( 3546 cast<PointerType>(ty)->getPointeeType())); 3547 break; 3548 3549 case Type::LValueReference: { 3550 const auto *lv = cast<LValueReferenceType>(ty); 3551 result = getLValueReferenceType( 3552 getVariableArrayDecayedType(lv->getPointeeType()), 3553 lv->isSpelledAsLValue()); 3554 break; 3555 } 3556 3557 case Type::RValueReference: { 3558 const auto *lv = cast<RValueReferenceType>(ty); 3559 result = getRValueReferenceType( 3560 getVariableArrayDecayedType(lv->getPointeeType())); 3561 break; 3562 } 3563 3564 case Type::Atomic: { 3565 const auto *at = cast<AtomicType>(ty); 3566 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3567 break; 3568 } 3569 3570 case Type::ConstantArray: { 3571 const auto *cat = cast<ConstantArrayType>(ty); 3572 result = getConstantArrayType( 3573 getVariableArrayDecayedType(cat->getElementType()), 3574 cat->getSize(), 3575 cat->getSizeExpr(), 3576 cat->getSizeModifier(), 3577 cat->getIndexTypeCVRQualifiers()); 3578 break; 3579 } 3580 3581 case Type::DependentSizedArray: { 3582 const auto *dat = cast<DependentSizedArrayType>(ty); 3583 result = getDependentSizedArrayType( 3584 getVariableArrayDecayedType(dat->getElementType()), 3585 dat->getSizeExpr(), 3586 dat->getSizeModifier(), 3587 dat->getIndexTypeCVRQualifiers(), 3588 dat->getBracketsRange()); 3589 break; 3590 } 3591 3592 // Turn incomplete types into [*] types. 3593 case Type::IncompleteArray: { 3594 const auto *iat = cast<IncompleteArrayType>(ty); 3595 result = getVariableArrayType( 3596 getVariableArrayDecayedType(iat->getElementType()), 3597 /*size*/ nullptr, 3598 ArrayType::Normal, 3599 iat->getIndexTypeCVRQualifiers(), 3600 SourceRange()); 3601 break; 3602 } 3603 3604 // Turn VLA types into [*] types. 3605 case Type::VariableArray: { 3606 const auto *vat = cast<VariableArrayType>(ty); 3607 result = getVariableArrayType( 3608 getVariableArrayDecayedType(vat->getElementType()), 3609 /*size*/ nullptr, 3610 ArrayType::Star, 3611 vat->getIndexTypeCVRQualifiers(), 3612 vat->getBracketsRange()); 3613 break; 3614 } 3615 } 3616 3617 // Apply the top-level qualifiers from the original. 3618 return getQualifiedType(result, split.Quals); 3619 } 3620 3621 /// getVariableArrayType - Returns a non-unique reference to the type for a 3622 /// variable array of the specified element type. 3623 QualType ASTContext::getVariableArrayType(QualType EltTy, 3624 Expr *NumElts, 3625 ArrayType::ArraySizeModifier ASM, 3626 unsigned IndexTypeQuals, 3627 SourceRange Brackets) const { 3628 // Since we don't unique expressions, it isn't possible to unique VLA's 3629 // that have an expression provided for their size. 3630 QualType Canon; 3631 3632 // Be sure to pull qualifiers off the element type. 3633 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3634 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3635 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3636 IndexTypeQuals, Brackets); 3637 Canon = getQualifiedType(Canon, canonSplit.Quals); 3638 } 3639 3640 auto *New = new (*this, TypeAlignment) 3641 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3642 3643 VariableArrayTypes.push_back(New); 3644 Types.push_back(New); 3645 return QualType(New, 0); 3646 } 3647 3648 /// getDependentSizedArrayType - Returns a non-unique reference to 3649 /// the type for a dependently-sized array of the specified element 3650 /// type. 3651 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3652 Expr *numElements, 3653 ArrayType::ArraySizeModifier ASM, 3654 unsigned elementTypeQuals, 3655 SourceRange brackets) const { 3656 assert((!numElements || numElements->isTypeDependent() || 3657 numElements->isValueDependent()) && 3658 "Size must be type- or value-dependent!"); 3659 3660 // Dependently-sized array types that do not have a specified number 3661 // of elements will have their sizes deduced from a dependent 3662 // initializer. We do no canonicalization here at all, which is okay 3663 // because they can't be used in most locations. 3664 if (!numElements) { 3665 auto *newType 3666 = new (*this, TypeAlignment) 3667 DependentSizedArrayType(*this, elementType, QualType(), 3668 numElements, ASM, elementTypeQuals, 3669 brackets); 3670 Types.push_back(newType); 3671 return QualType(newType, 0); 3672 } 3673 3674 // Otherwise, we actually build a new type every time, but we 3675 // also build a canonical type. 3676 3677 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3678 3679 void *insertPos = nullptr; 3680 llvm::FoldingSetNodeID ID; 3681 DependentSizedArrayType::Profile(ID, *this, 3682 QualType(canonElementType.Ty, 0), 3683 ASM, elementTypeQuals, numElements); 3684 3685 // Look for an existing type with these properties. 3686 DependentSizedArrayType *canonTy = 3687 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3688 3689 // If we don't have one, build one. 3690 if (!canonTy) { 3691 canonTy = new (*this, TypeAlignment) 3692 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), 3693 QualType(), numElements, ASM, elementTypeQuals, 3694 brackets); 3695 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3696 Types.push_back(canonTy); 3697 } 3698 3699 // Apply qualifiers from the element type to the array. 3700 QualType canon = getQualifiedType(QualType(canonTy,0), 3701 canonElementType.Quals); 3702 3703 // If we didn't need extra canonicalization for the element type or the size 3704 // expression, then just use that as our result. 3705 if (QualType(canonElementType.Ty, 0) == elementType && 3706 canonTy->getSizeExpr() == numElements) 3707 return canon; 3708 3709 // Otherwise, we need to build a type which follows the spelling 3710 // of the element type. 3711 auto *sugaredType 3712 = new (*this, TypeAlignment) 3713 DependentSizedArrayType(*this, elementType, canon, numElements, 3714 ASM, elementTypeQuals, brackets); 3715 Types.push_back(sugaredType); 3716 return QualType(sugaredType, 0); 3717 } 3718 3719 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3720 ArrayType::ArraySizeModifier ASM, 3721 unsigned elementTypeQuals) const { 3722 llvm::FoldingSetNodeID ID; 3723 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3724 3725 void *insertPos = nullptr; 3726 if (IncompleteArrayType *iat = 3727 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3728 return QualType(iat, 0); 3729 3730 // If the element type isn't canonical, this won't be a canonical type 3731 // either, so fill in the canonical type field. We also have to pull 3732 // qualifiers off the element type. 3733 QualType canon; 3734 3735 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3736 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3737 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3738 ASM, elementTypeQuals); 3739 canon = getQualifiedType(canon, canonSplit.Quals); 3740 3741 // Get the new insert position for the node we care about. 3742 IncompleteArrayType *existing = 3743 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3744 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3745 } 3746 3747 auto *newType = new (*this, TypeAlignment) 3748 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3749 3750 IncompleteArrayTypes.InsertNode(newType, insertPos); 3751 Types.push_back(newType); 3752 return QualType(newType, 0); 3753 } 3754 3755 ASTContext::BuiltinVectorTypeInfo 3756 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3757 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3758 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3759 NUMVECTORS}; 3760 3761 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3762 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3763 3764 switch (Ty->getKind()) { 3765 default: 3766 llvm_unreachable("Unsupported builtin vector type"); 3767 case BuiltinType::SveInt8: 3768 return SVE_INT_ELTTY(8, 16, true, 1); 3769 case BuiltinType::SveUint8: 3770 return SVE_INT_ELTTY(8, 16, false, 1); 3771 case BuiltinType::SveInt8x2: 3772 return SVE_INT_ELTTY(8, 16, true, 2); 3773 case BuiltinType::SveUint8x2: 3774 return SVE_INT_ELTTY(8, 16, false, 2); 3775 case BuiltinType::SveInt8x3: 3776 return SVE_INT_ELTTY(8, 16, true, 3); 3777 case BuiltinType::SveUint8x3: 3778 return SVE_INT_ELTTY(8, 16, false, 3); 3779 case BuiltinType::SveInt8x4: 3780 return SVE_INT_ELTTY(8, 16, true, 4); 3781 case BuiltinType::SveUint8x4: 3782 return SVE_INT_ELTTY(8, 16, false, 4); 3783 case BuiltinType::SveInt16: 3784 return SVE_INT_ELTTY(16, 8, true, 1); 3785 case BuiltinType::SveUint16: 3786 return SVE_INT_ELTTY(16, 8, false, 1); 3787 case BuiltinType::SveInt16x2: 3788 return SVE_INT_ELTTY(16, 8, true, 2); 3789 case BuiltinType::SveUint16x2: 3790 return SVE_INT_ELTTY(16, 8, false, 2); 3791 case BuiltinType::SveInt16x3: 3792 return SVE_INT_ELTTY(16, 8, true, 3); 3793 case BuiltinType::SveUint16x3: 3794 return SVE_INT_ELTTY(16, 8, false, 3); 3795 case BuiltinType::SveInt16x4: 3796 return SVE_INT_ELTTY(16, 8, true, 4); 3797 case BuiltinType::SveUint16x4: 3798 return SVE_INT_ELTTY(16, 8, false, 4); 3799 case BuiltinType::SveInt32: 3800 return SVE_INT_ELTTY(32, 4, true, 1); 3801 case BuiltinType::SveUint32: 3802 return SVE_INT_ELTTY(32, 4, false, 1); 3803 case BuiltinType::SveInt32x2: 3804 return SVE_INT_ELTTY(32, 4, true, 2); 3805 case BuiltinType::SveUint32x2: 3806 return SVE_INT_ELTTY(32, 4, false, 2); 3807 case BuiltinType::SveInt32x3: 3808 return SVE_INT_ELTTY(32, 4, true, 3); 3809 case BuiltinType::SveUint32x3: 3810 return SVE_INT_ELTTY(32, 4, false, 3); 3811 case BuiltinType::SveInt32x4: 3812 return SVE_INT_ELTTY(32, 4, true, 4); 3813 case BuiltinType::SveUint32x4: 3814 return SVE_INT_ELTTY(32, 4, false, 4); 3815 case BuiltinType::SveInt64: 3816 return SVE_INT_ELTTY(64, 2, true, 1); 3817 case BuiltinType::SveUint64: 3818 return SVE_INT_ELTTY(64, 2, false, 1); 3819 case BuiltinType::SveInt64x2: 3820 return SVE_INT_ELTTY(64, 2, true, 2); 3821 case BuiltinType::SveUint64x2: 3822 return SVE_INT_ELTTY(64, 2, false, 2); 3823 case BuiltinType::SveInt64x3: 3824 return SVE_INT_ELTTY(64, 2, true, 3); 3825 case BuiltinType::SveUint64x3: 3826 return SVE_INT_ELTTY(64, 2, false, 3); 3827 case BuiltinType::SveInt64x4: 3828 return SVE_INT_ELTTY(64, 2, true, 4); 3829 case BuiltinType::SveUint64x4: 3830 return SVE_INT_ELTTY(64, 2, false, 4); 3831 case BuiltinType::SveBool: 3832 return SVE_ELTTY(BoolTy, 16, 1); 3833 case BuiltinType::SveFloat16: 3834 return SVE_ELTTY(HalfTy, 8, 1); 3835 case BuiltinType::SveFloat16x2: 3836 return SVE_ELTTY(HalfTy, 8, 2); 3837 case BuiltinType::SveFloat16x3: 3838 return SVE_ELTTY(HalfTy, 8, 3); 3839 case BuiltinType::SveFloat16x4: 3840 return SVE_ELTTY(HalfTy, 8, 4); 3841 case BuiltinType::SveFloat32: 3842 return SVE_ELTTY(FloatTy, 4, 1); 3843 case BuiltinType::SveFloat32x2: 3844 return SVE_ELTTY(FloatTy, 4, 2); 3845 case BuiltinType::SveFloat32x3: 3846 return SVE_ELTTY(FloatTy, 4, 3); 3847 case BuiltinType::SveFloat32x4: 3848 return SVE_ELTTY(FloatTy, 4, 4); 3849 case BuiltinType::SveFloat64: 3850 return SVE_ELTTY(DoubleTy, 2, 1); 3851 case BuiltinType::SveFloat64x2: 3852 return SVE_ELTTY(DoubleTy, 2, 2); 3853 case BuiltinType::SveFloat64x3: 3854 return SVE_ELTTY(DoubleTy, 2, 3); 3855 case BuiltinType::SveFloat64x4: 3856 return SVE_ELTTY(DoubleTy, 2, 4); 3857 case BuiltinType::SveBFloat16: 3858 return SVE_ELTTY(BFloat16Ty, 8, 1); 3859 case BuiltinType::SveBFloat16x2: 3860 return SVE_ELTTY(BFloat16Ty, 8, 2); 3861 case BuiltinType::SveBFloat16x3: 3862 return SVE_ELTTY(BFloat16Ty, 8, 3); 3863 case BuiltinType::SveBFloat16x4: 3864 return SVE_ELTTY(BFloat16Ty, 8, 4); 3865 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3866 IsSigned) \ 3867 case BuiltinType::Id: \ 3868 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3869 llvm::ElementCount::getScalable(NumEls), NF}; 3870 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3871 case BuiltinType::Id: \ 3872 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3873 llvm::ElementCount::getScalable(NumEls), NF}; 3874 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3875 case BuiltinType::Id: \ 3876 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3877 #include "clang/Basic/RISCVVTypes.def" 3878 } 3879 } 3880 3881 /// getScalableVectorType - Return the unique reference to a scalable vector 3882 /// type of the specified element type and size. VectorType must be a built-in 3883 /// type. 3884 QualType ASTContext::getScalableVectorType(QualType EltTy, 3885 unsigned NumElts) const { 3886 if (Target->hasAArch64SVETypes()) { 3887 uint64_t EltTySize = getTypeSize(EltTy); 3888 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3889 IsSigned, IsFP, IsBF) \ 3890 if (!EltTy->isBooleanType() && \ 3891 ((EltTy->hasIntegerRepresentation() && \ 3892 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3893 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3894 IsFP && !IsBF) || \ 3895 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3896 IsBF && !IsFP)) && \ 3897 EltTySize == ElBits && NumElts == NumEls) { \ 3898 return SingletonId; \ 3899 } 3900 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3901 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3902 return SingletonId; 3903 #include "clang/Basic/AArch64SVEACLETypes.def" 3904 } else if (Target->hasRISCVVTypes()) { 3905 uint64_t EltTySize = getTypeSize(EltTy); 3906 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3907 IsFP) \ 3908 if (!EltTy->isBooleanType() && \ 3909 ((EltTy->hasIntegerRepresentation() && \ 3910 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3911 (EltTy->hasFloatingRepresentation() && IsFP)) && \ 3912 EltTySize == ElBits && NumElts == NumEls) \ 3913 return SingletonId; 3914 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3915 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3916 return SingletonId; 3917 #include "clang/Basic/RISCVVTypes.def" 3918 } 3919 return QualType(); 3920 } 3921 3922 /// getVectorType - Return the unique reference to a vector type of 3923 /// the specified element type and size. VectorType must be a built-in type. 3924 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 3925 VectorType::VectorKind VecKind) const { 3926 assert(vecType->isBuiltinType()); 3927 3928 // Check if we've already instantiated a vector of this type. 3929 llvm::FoldingSetNodeID ID; 3930 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 3931 3932 void *InsertPos = nullptr; 3933 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 3934 return QualType(VTP, 0); 3935 3936 // If the element type isn't canonical, this won't be a canonical type either, 3937 // so fill in the canonical type field. 3938 QualType Canonical; 3939 if (!vecType.isCanonical()) { 3940 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 3941 3942 // Get the new insert position for the node we care about. 3943 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 3944 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3945 } 3946 auto *New = new (*this, TypeAlignment) 3947 VectorType(vecType, NumElts, Canonical, VecKind); 3948 VectorTypes.InsertNode(New, InsertPos); 3949 Types.push_back(New); 3950 return QualType(New, 0); 3951 } 3952 3953 QualType 3954 ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 3955 SourceLocation AttrLoc, 3956 VectorType::VectorKind VecKind) const { 3957 llvm::FoldingSetNodeID ID; 3958 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 3959 VecKind); 3960 void *InsertPos = nullptr; 3961 DependentVectorType *Canon = 3962 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 3963 DependentVectorType *New; 3964 3965 if (Canon) { 3966 New = new (*this, TypeAlignment) DependentVectorType( 3967 *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 3968 } else { 3969 QualType CanonVecTy = getCanonicalType(VecType); 3970 if (CanonVecTy == VecType) { 3971 New = new (*this, TypeAlignment) DependentVectorType( 3972 *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); 3973 3974 DependentVectorType *CanonCheck = 3975 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 3976 assert(!CanonCheck && 3977 "Dependent-sized vector_size canonical type broken"); 3978 (void)CanonCheck; 3979 DependentVectorTypes.InsertNode(New, InsertPos); 3980 } else { 3981 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 3982 SourceLocation(), VecKind); 3983 New = new (*this, TypeAlignment) DependentVectorType( 3984 *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 3985 } 3986 } 3987 3988 Types.push_back(New); 3989 return QualType(New, 0); 3990 } 3991 3992 /// getExtVectorType - Return the unique reference to an extended vector type of 3993 /// the specified element type and size. VectorType must be a built-in type. 3994 QualType 3995 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { 3996 assert(vecType->isBuiltinType() || vecType->isDependentType()); 3997 3998 // Check if we've already instantiated a vector of this type. 3999 llvm::FoldingSetNodeID ID; 4000 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4001 VectorType::GenericVector); 4002 void *InsertPos = nullptr; 4003 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4004 return QualType(VTP, 0); 4005 4006 // If the element type isn't canonical, this won't be a canonical type either, 4007 // so fill in the canonical type field. 4008 QualType Canonical; 4009 if (!vecType.isCanonical()) { 4010 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4011 4012 // Get the new insert position for the node we care about. 4013 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4014 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4015 } 4016 auto *New = new (*this, TypeAlignment) 4017 ExtVectorType(vecType, NumElts, Canonical); 4018 VectorTypes.InsertNode(New, InsertPos); 4019 Types.push_back(New); 4020 return QualType(New, 0); 4021 } 4022 4023 QualType 4024 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4025 Expr *SizeExpr, 4026 SourceLocation AttrLoc) const { 4027 llvm::FoldingSetNodeID ID; 4028 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4029 SizeExpr); 4030 4031 void *InsertPos = nullptr; 4032 DependentSizedExtVectorType *Canon 4033 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4034 DependentSizedExtVectorType *New; 4035 if (Canon) { 4036 // We already have a canonical version of this array type; use it as 4037 // the canonical type for a newly-built type. 4038 New = new (*this, TypeAlignment) 4039 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 4040 SizeExpr, AttrLoc); 4041 } else { 4042 QualType CanonVecTy = getCanonicalType(vecType); 4043 if (CanonVecTy == vecType) { 4044 New = new (*this, TypeAlignment) 4045 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 4046 AttrLoc); 4047 4048 DependentSizedExtVectorType *CanonCheck 4049 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4050 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4051 (void)CanonCheck; 4052 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4053 } else { 4054 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4055 SourceLocation()); 4056 New = new (*this, TypeAlignment) DependentSizedExtVectorType( 4057 *this, vecType, CanonExtTy, SizeExpr, AttrLoc); 4058 } 4059 } 4060 4061 Types.push_back(New); 4062 return QualType(New, 0); 4063 } 4064 4065 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4066 unsigned NumColumns) const { 4067 llvm::FoldingSetNodeID ID; 4068 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4069 Type::ConstantMatrix); 4070 4071 assert(MatrixType::isValidElementType(ElementTy) && 4072 "need a valid element type"); 4073 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4074 ConstantMatrixType::isDimensionValid(NumColumns) && 4075 "need valid matrix dimensions"); 4076 void *InsertPos = nullptr; 4077 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4078 return QualType(MTP, 0); 4079 4080 QualType Canonical; 4081 if (!ElementTy.isCanonical()) { 4082 Canonical = 4083 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4084 4085 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4086 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4087 (void)NewIP; 4088 } 4089 4090 auto *New = new (*this, TypeAlignment) 4091 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4092 MatrixTypes.InsertNode(New, InsertPos); 4093 Types.push_back(New); 4094 return QualType(New, 0); 4095 } 4096 4097 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4098 Expr *RowExpr, 4099 Expr *ColumnExpr, 4100 SourceLocation AttrLoc) const { 4101 QualType CanonElementTy = getCanonicalType(ElementTy); 4102 llvm::FoldingSetNodeID ID; 4103 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4104 ColumnExpr); 4105 4106 void *InsertPos = nullptr; 4107 DependentSizedMatrixType *Canon = 4108 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4109 4110 if (!Canon) { 4111 Canon = new (*this, TypeAlignment) DependentSizedMatrixType( 4112 *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); 4113 #ifndef NDEBUG 4114 DependentSizedMatrixType *CanonCheck = 4115 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4116 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4117 #endif 4118 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4119 Types.push_back(Canon); 4120 } 4121 4122 // Already have a canonical version of the matrix type 4123 // 4124 // If it exactly matches the requested type, use it directly. 4125 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4126 Canon->getRowExpr() == ColumnExpr) 4127 return QualType(Canon, 0); 4128 4129 // Use Canon as the canonical type for newly-built type. 4130 DependentSizedMatrixType *New = new (*this, TypeAlignment) 4131 DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, 4132 ColumnExpr, AttrLoc); 4133 Types.push_back(New); 4134 return QualType(New, 0); 4135 } 4136 4137 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4138 Expr *AddrSpaceExpr, 4139 SourceLocation AttrLoc) const { 4140 assert(AddrSpaceExpr->isInstantiationDependent()); 4141 4142 QualType canonPointeeType = getCanonicalType(PointeeType); 4143 4144 void *insertPos = nullptr; 4145 llvm::FoldingSetNodeID ID; 4146 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4147 AddrSpaceExpr); 4148 4149 DependentAddressSpaceType *canonTy = 4150 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4151 4152 if (!canonTy) { 4153 canonTy = new (*this, TypeAlignment) 4154 DependentAddressSpaceType(*this, canonPointeeType, 4155 QualType(), AddrSpaceExpr, AttrLoc); 4156 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4157 Types.push_back(canonTy); 4158 } 4159 4160 if (canonPointeeType == PointeeType && 4161 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4162 return QualType(canonTy, 0); 4163 4164 auto *sugaredType 4165 = new (*this, TypeAlignment) 4166 DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), 4167 AddrSpaceExpr, AttrLoc); 4168 Types.push_back(sugaredType); 4169 return QualType(sugaredType, 0); 4170 } 4171 4172 /// Determine whether \p T is canonical as the result type of a function. 4173 static bool isCanonicalResultType(QualType T) { 4174 return T.isCanonical() && 4175 (T.getObjCLifetime() == Qualifiers::OCL_None || 4176 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4177 } 4178 4179 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4180 QualType 4181 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4182 const FunctionType::ExtInfo &Info) const { 4183 // Unique functions, to guarantee there is only one function of a particular 4184 // structure. 4185 llvm::FoldingSetNodeID ID; 4186 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4187 4188 void *InsertPos = nullptr; 4189 if (FunctionNoProtoType *FT = 4190 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4191 return QualType(FT, 0); 4192 4193 QualType Canonical; 4194 if (!isCanonicalResultType(ResultTy)) { 4195 Canonical = 4196 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4197 4198 // Get the new insert position for the node we care about. 4199 FunctionNoProtoType *NewIP = 4200 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4201 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4202 } 4203 4204 auto *New = new (*this, TypeAlignment) 4205 FunctionNoProtoType(ResultTy, Canonical, Info); 4206 Types.push_back(New); 4207 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4208 return QualType(New, 0); 4209 } 4210 4211 CanQualType 4212 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4213 CanQualType CanResultType = getCanonicalType(ResultType); 4214 4215 // Canonical result types do not have ARC lifetime qualifiers. 4216 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4217 Qualifiers Qs = CanResultType.getQualifiers(); 4218 Qs.removeObjCLifetime(); 4219 return CanQualType::CreateUnsafe( 4220 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4221 } 4222 4223 return CanResultType; 4224 } 4225 4226 static bool isCanonicalExceptionSpecification( 4227 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4228 if (ESI.Type == EST_None) 4229 return true; 4230 if (!NoexceptInType) 4231 return false; 4232 4233 // C++17 onwards: exception specification is part of the type, as a simple 4234 // boolean "can this function type throw". 4235 if (ESI.Type == EST_BasicNoexcept) 4236 return true; 4237 4238 // A noexcept(expr) specification is (possibly) canonical if expr is 4239 // value-dependent. 4240 if (ESI.Type == EST_DependentNoexcept) 4241 return true; 4242 4243 // A dynamic exception specification is canonical if it only contains pack 4244 // expansions (so we can't tell whether it's non-throwing) and all its 4245 // contained types are canonical. 4246 if (ESI.Type == EST_Dynamic) { 4247 bool AnyPackExpansions = false; 4248 for (QualType ET : ESI.Exceptions) { 4249 if (!ET.isCanonical()) 4250 return false; 4251 if (ET->getAs<PackExpansionType>()) 4252 AnyPackExpansions = true; 4253 } 4254 return AnyPackExpansions; 4255 } 4256 4257 return false; 4258 } 4259 4260 QualType ASTContext::getFunctionTypeInternal( 4261 QualType ResultTy, ArrayRef<QualType> ArgArray, 4262 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4263 size_t NumArgs = ArgArray.size(); 4264 4265 // Unique functions, to guarantee there is only one function of a particular 4266 // structure. 4267 llvm::FoldingSetNodeID ID; 4268 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4269 *this, true); 4270 4271 QualType Canonical; 4272 bool Unique = false; 4273 4274 void *InsertPos = nullptr; 4275 if (FunctionProtoType *FPT = 4276 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4277 QualType Existing = QualType(FPT, 0); 4278 4279 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4280 // it so long as our exception specification doesn't contain a dependent 4281 // noexcept expression, or we're just looking for a canonical type. 4282 // Otherwise, we're going to need to create a type 4283 // sugar node to hold the concrete expression. 4284 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4285 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4286 return Existing; 4287 4288 // We need a new type sugar node for this one, to hold the new noexcept 4289 // expression. We do no canonicalization here, but that's OK since we don't 4290 // expect to see the same noexcept expression much more than once. 4291 Canonical = getCanonicalType(Existing); 4292 Unique = true; 4293 } 4294 4295 bool NoexceptInType = getLangOpts().CPlusPlus17; 4296 bool IsCanonicalExceptionSpec = 4297 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4298 4299 // Determine whether the type being created is already canonical or not. 4300 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4301 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4302 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4303 if (!ArgArray[i].isCanonicalAsParam()) 4304 isCanonical = false; 4305 4306 if (OnlyWantCanonical) 4307 assert(isCanonical && 4308 "given non-canonical parameters constructing canonical type"); 4309 4310 // If this type isn't canonical, get the canonical version of it if we don't 4311 // already have it. The exception spec is only partially part of the 4312 // canonical type, and only in C++17 onwards. 4313 if (!isCanonical && Canonical.isNull()) { 4314 SmallVector<QualType, 16> CanonicalArgs; 4315 CanonicalArgs.reserve(NumArgs); 4316 for (unsigned i = 0; i != NumArgs; ++i) 4317 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4318 4319 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4320 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4321 CanonicalEPI.HasTrailingReturn = false; 4322 4323 if (IsCanonicalExceptionSpec) { 4324 // Exception spec is already OK. 4325 } else if (NoexceptInType) { 4326 switch (EPI.ExceptionSpec.Type) { 4327 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4328 // We don't know yet. It shouldn't matter what we pick here; no-one 4329 // should ever look at this. 4330 LLVM_FALLTHROUGH; 4331 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4332 CanonicalEPI.ExceptionSpec.Type = EST_None; 4333 break; 4334 4335 // A dynamic exception specification is almost always "not noexcept", 4336 // with the exception that a pack expansion might expand to no types. 4337 case EST_Dynamic: { 4338 bool AnyPacks = false; 4339 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4340 if (ET->getAs<PackExpansionType>()) 4341 AnyPacks = true; 4342 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4343 } 4344 if (!AnyPacks) 4345 CanonicalEPI.ExceptionSpec.Type = EST_None; 4346 else { 4347 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4348 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4349 } 4350 break; 4351 } 4352 4353 case EST_DynamicNone: 4354 case EST_BasicNoexcept: 4355 case EST_NoexceptTrue: 4356 case EST_NoThrow: 4357 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4358 break; 4359 4360 case EST_DependentNoexcept: 4361 llvm_unreachable("dependent noexcept is already canonical"); 4362 } 4363 } else { 4364 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4365 } 4366 4367 // Adjust the canonical function result type. 4368 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4369 Canonical = 4370 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4371 4372 // Get the new insert position for the node we care about. 4373 FunctionProtoType *NewIP = 4374 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4375 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4376 } 4377 4378 // Compute the needed size to hold this FunctionProtoType and the 4379 // various trailing objects. 4380 auto ESH = FunctionProtoType::getExceptionSpecSize( 4381 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4382 size_t Size = FunctionProtoType::totalSizeToAlloc< 4383 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4384 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4385 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4386 NumArgs, EPI.Variadic, 4387 FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type), 4388 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4389 EPI.ExtParameterInfos ? NumArgs : 0, 4390 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4391 4392 auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); 4393 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4394 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4395 Types.push_back(FTP); 4396 if (!Unique) 4397 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4398 return QualType(FTP, 0); 4399 } 4400 4401 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4402 llvm::FoldingSetNodeID ID; 4403 PipeType::Profile(ID, T, ReadOnly); 4404 4405 void *InsertPos = nullptr; 4406 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4407 return QualType(PT, 0); 4408 4409 // If the pipe element type isn't canonical, this won't be a canonical type 4410 // either, so fill in the canonical type field. 4411 QualType Canonical; 4412 if (!T.isCanonical()) { 4413 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4414 4415 // Get the new insert position for the node we care about. 4416 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4417 assert(!NewIP && "Shouldn't be in the map!"); 4418 (void)NewIP; 4419 } 4420 auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); 4421 Types.push_back(New); 4422 PipeTypes.InsertNode(New, InsertPos); 4423 return QualType(New, 0); 4424 } 4425 4426 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4427 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4428 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4429 : Ty; 4430 } 4431 4432 QualType ASTContext::getReadPipeType(QualType T) const { 4433 return getPipeType(T, true); 4434 } 4435 4436 QualType ASTContext::getWritePipeType(QualType T) const { 4437 return getPipeType(T, false); 4438 } 4439 4440 QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const { 4441 llvm::FoldingSetNodeID ID; 4442 ExtIntType::Profile(ID, IsUnsigned, NumBits); 4443 4444 void *InsertPos = nullptr; 4445 if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4446 return QualType(EIT, 0); 4447 4448 auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits); 4449 ExtIntTypes.InsertNode(New, InsertPos); 4450 Types.push_back(New); 4451 return QualType(New, 0); 4452 } 4453 4454 QualType ASTContext::getDependentExtIntType(bool IsUnsigned, 4455 Expr *NumBitsExpr) const { 4456 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4457 llvm::FoldingSetNodeID ID; 4458 DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4459 4460 void *InsertPos = nullptr; 4461 if (DependentExtIntType *Existing = 4462 DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4463 return QualType(Existing, 0); 4464 4465 auto *New = new (*this, TypeAlignment) 4466 DependentExtIntType(*this, IsUnsigned, NumBitsExpr); 4467 DependentExtIntTypes.InsertNode(New, InsertPos); 4468 4469 Types.push_back(New); 4470 return QualType(New, 0); 4471 } 4472 4473 #ifndef NDEBUG 4474 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4475 if (!isa<CXXRecordDecl>(D)) return false; 4476 const auto *RD = cast<CXXRecordDecl>(D); 4477 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4478 return true; 4479 if (RD->getDescribedClassTemplate() && 4480 !isa<ClassTemplateSpecializationDecl>(RD)) 4481 return true; 4482 return false; 4483 } 4484 #endif 4485 4486 /// getInjectedClassNameType - Return the unique reference to the 4487 /// injected class name type for the specified templated declaration. 4488 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4489 QualType TST) const { 4490 assert(NeedsInjectedClassNameType(Decl)); 4491 if (Decl->TypeForDecl) { 4492 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4493 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4494 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4495 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4496 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4497 } else { 4498 Type *newType = 4499 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 4500 Decl->TypeForDecl = newType; 4501 Types.push_back(newType); 4502 } 4503 return QualType(Decl->TypeForDecl, 0); 4504 } 4505 4506 /// getTypeDeclType - Return the unique reference to the type for the 4507 /// specified type declaration. 4508 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4509 assert(Decl && "Passed null for Decl param"); 4510 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4511 4512 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4513 return getTypedefType(Typedef); 4514 4515 assert(!isa<TemplateTypeParmDecl>(Decl) && 4516 "Template type parameter types are always available."); 4517 4518 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4519 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4520 assert(!NeedsInjectedClassNameType(Record)); 4521 return getRecordType(Record); 4522 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4523 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4524 return getEnumType(Enum); 4525 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4526 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using); 4527 Decl->TypeForDecl = newType; 4528 Types.push_back(newType); 4529 } else 4530 llvm_unreachable("TypeDecl without a type?"); 4531 4532 return QualType(Decl->TypeForDecl, 0); 4533 } 4534 4535 /// getTypedefType - Return the unique reference to the type for the 4536 /// specified typedef name decl. 4537 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4538 QualType Underlying) const { 4539 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4540 4541 if (Underlying.isNull()) 4542 Underlying = Decl->getUnderlyingType(); 4543 QualType Canonical = getCanonicalType(Underlying); 4544 auto *newType = new (*this, TypeAlignment) 4545 TypedefType(Type::Typedef, Decl, Underlying, Canonical); 4546 Decl->TypeForDecl = newType; 4547 Types.push_back(newType); 4548 return QualType(newType, 0); 4549 } 4550 4551 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4552 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4553 4554 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4555 if (PrevDecl->TypeForDecl) 4556 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4557 4558 auto *newType = new (*this, TypeAlignment) RecordType(Decl); 4559 Decl->TypeForDecl = newType; 4560 Types.push_back(newType); 4561 return QualType(newType, 0); 4562 } 4563 4564 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4565 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4566 4567 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4568 if (PrevDecl->TypeForDecl) 4569 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4570 4571 auto *newType = new (*this, TypeAlignment) EnumType(Decl); 4572 Decl->TypeForDecl = newType; 4573 Types.push_back(newType); 4574 return QualType(newType, 0); 4575 } 4576 4577 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4578 QualType modifiedType, 4579 QualType equivalentType) { 4580 llvm::FoldingSetNodeID id; 4581 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4582 4583 void *insertPos = nullptr; 4584 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4585 if (type) return QualType(type, 0); 4586 4587 QualType canon = getCanonicalType(equivalentType); 4588 type = new (*this, TypeAlignment) 4589 AttributedType(canon, attrKind, modifiedType, equivalentType); 4590 4591 Types.push_back(type); 4592 AttributedTypes.InsertNode(type, insertPos); 4593 4594 return QualType(type, 0); 4595 } 4596 4597 /// Retrieve a substitution-result type. 4598 QualType 4599 ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, 4600 QualType Replacement) const { 4601 assert(Replacement.isCanonical() 4602 && "replacement types must always be canonical"); 4603 4604 llvm::FoldingSetNodeID ID; 4605 SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); 4606 void *InsertPos = nullptr; 4607 SubstTemplateTypeParmType *SubstParm 4608 = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4609 4610 if (!SubstParm) { 4611 SubstParm = new (*this, TypeAlignment) 4612 SubstTemplateTypeParmType(Parm, Replacement); 4613 Types.push_back(SubstParm); 4614 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4615 } 4616 4617 return QualType(SubstParm, 0); 4618 } 4619 4620 /// Retrieve a 4621 QualType ASTContext::getSubstTemplateTypeParmPackType( 4622 const TemplateTypeParmType *Parm, 4623 const TemplateArgument &ArgPack) { 4624 #ifndef NDEBUG 4625 for (const auto &P : ArgPack.pack_elements()) { 4626 assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type"); 4627 assert(P.getAsType().isCanonical() && "Pack contains non-canonical type"); 4628 } 4629 #endif 4630 4631 llvm::FoldingSetNodeID ID; 4632 SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); 4633 void *InsertPos = nullptr; 4634 if (SubstTemplateTypeParmPackType *SubstParm 4635 = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4636 return QualType(SubstParm, 0); 4637 4638 QualType Canon; 4639 if (!Parm->isCanonicalUnqualified()) { 4640 Canon = getCanonicalType(QualType(Parm, 0)); 4641 Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), 4642 ArgPack); 4643 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4644 } 4645 4646 auto *SubstParm 4647 = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, 4648 ArgPack); 4649 Types.push_back(SubstParm); 4650 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4651 return QualType(SubstParm, 0); 4652 } 4653 4654 /// Retrieve the template type parameter type for a template 4655 /// parameter or parameter pack with the given depth, index, and (optionally) 4656 /// name. 4657 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4658 bool ParameterPack, 4659 TemplateTypeParmDecl *TTPDecl) const { 4660 llvm::FoldingSetNodeID ID; 4661 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4662 void *InsertPos = nullptr; 4663 TemplateTypeParmType *TypeParm 4664 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4665 4666 if (TypeParm) 4667 return QualType(TypeParm, 0); 4668 4669 if (TTPDecl) { 4670 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4671 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 4672 4673 TemplateTypeParmType *TypeCheck 4674 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4675 assert(!TypeCheck && "Template type parameter canonical type broken"); 4676 (void)TypeCheck; 4677 } else 4678 TypeParm = new (*this, TypeAlignment) 4679 TemplateTypeParmType(Depth, Index, ParameterPack); 4680 4681 Types.push_back(TypeParm); 4682 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4683 4684 return QualType(TypeParm, 0); 4685 } 4686 4687 TypeSourceInfo * 4688 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4689 SourceLocation NameLoc, 4690 const TemplateArgumentListInfo &Args, 4691 QualType Underlying) const { 4692 assert(!Name.getAsDependentTemplateName() && 4693 "No dependent template names here!"); 4694 QualType TST = getTemplateSpecializationType(Name, Args, Underlying); 4695 4696 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4697 TemplateSpecializationTypeLoc TL = 4698 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4699 TL.setTemplateKeywordLoc(SourceLocation()); 4700 TL.setTemplateNameLoc(NameLoc); 4701 TL.setLAngleLoc(Args.getLAngleLoc()); 4702 TL.setRAngleLoc(Args.getRAngleLoc()); 4703 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4704 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4705 return DI; 4706 } 4707 4708 QualType 4709 ASTContext::getTemplateSpecializationType(TemplateName Template, 4710 const TemplateArgumentListInfo &Args, 4711 QualType Underlying) const { 4712 assert(!Template.getAsDependentTemplateName() && 4713 "No dependent template names here!"); 4714 4715 SmallVector<TemplateArgument, 4> ArgVec; 4716 ArgVec.reserve(Args.size()); 4717 for (const TemplateArgumentLoc &Arg : Args.arguments()) 4718 ArgVec.push_back(Arg.getArgument()); 4719 4720 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4721 } 4722 4723 #ifndef NDEBUG 4724 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4725 for (const TemplateArgument &Arg : Args) 4726 if (Arg.isPackExpansion()) 4727 return true; 4728 4729 return true; 4730 } 4731 #endif 4732 4733 QualType 4734 ASTContext::getTemplateSpecializationType(TemplateName Template, 4735 ArrayRef<TemplateArgument> Args, 4736 QualType Underlying) const { 4737 assert(!Template.getAsDependentTemplateName() && 4738 "No dependent template names here!"); 4739 // Look through qualified template names. 4740 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4741 Template = TemplateName(QTN->getTemplateDecl()); 4742 4743 bool IsTypeAlias = 4744 Template.getAsTemplateDecl() && 4745 isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); 4746 QualType CanonType; 4747 if (!Underlying.isNull()) 4748 CanonType = getCanonicalType(Underlying); 4749 else { 4750 // We can get here with an alias template when the specialization contains 4751 // a pack expansion that does not match up with a parameter pack. 4752 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4753 "Caller must compute aliased type"); 4754 IsTypeAlias = false; 4755 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4756 } 4757 4758 // Allocate the (non-canonical) template specialization type, but don't 4759 // try to unique it: these types typically have location information that 4760 // we don't unique and don't want to lose. 4761 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4762 sizeof(TemplateArgument) * Args.size() + 4763 (IsTypeAlias? sizeof(QualType) : 0), 4764 TypeAlignment); 4765 auto *Spec 4766 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4767 IsTypeAlias ? Underlying : QualType()); 4768 4769 Types.push_back(Spec); 4770 return QualType(Spec, 0); 4771 } 4772 4773 QualType ASTContext::getCanonicalTemplateSpecializationType( 4774 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4775 assert(!Template.getAsDependentTemplateName() && 4776 "No dependent template names here!"); 4777 4778 // Look through qualified template names. 4779 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4780 Template = TemplateName(QTN->getTemplateDecl()); 4781 4782 // Build the canonical template specialization type. 4783 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4784 SmallVector<TemplateArgument, 4> CanonArgs; 4785 unsigned NumArgs = Args.size(); 4786 CanonArgs.reserve(NumArgs); 4787 for (const TemplateArgument &Arg : Args) 4788 CanonArgs.push_back(getCanonicalTemplateArgument(Arg)); 4789 4790 // Determine whether this canonical template specialization type already 4791 // exists. 4792 llvm::FoldingSetNodeID ID; 4793 TemplateSpecializationType::Profile(ID, CanonTemplate, 4794 CanonArgs, *this); 4795 4796 void *InsertPos = nullptr; 4797 TemplateSpecializationType *Spec 4798 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4799 4800 if (!Spec) { 4801 // Allocate a new canonical template specialization type. 4802 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4803 sizeof(TemplateArgument) * NumArgs), 4804 TypeAlignment); 4805 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 4806 CanonArgs, 4807 QualType(), QualType()); 4808 Types.push_back(Spec); 4809 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 4810 } 4811 4812 assert(Spec->isDependentType() && 4813 "Non-dependent template-id type must have a canonical type"); 4814 return QualType(Spec, 0); 4815 } 4816 4817 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 4818 NestedNameSpecifier *NNS, 4819 QualType NamedType, 4820 TagDecl *OwnedTagDecl) const { 4821 llvm::FoldingSetNodeID ID; 4822 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 4823 4824 void *InsertPos = nullptr; 4825 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4826 if (T) 4827 return QualType(T, 0); 4828 4829 QualType Canon = NamedType; 4830 if (!Canon.isCanonical()) { 4831 Canon = getCanonicalType(NamedType); 4832 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4833 assert(!CheckT && "Elaborated canonical type broken"); 4834 (void)CheckT; 4835 } 4836 4837 void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 4838 TypeAlignment); 4839 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 4840 4841 Types.push_back(T); 4842 ElaboratedTypes.InsertNode(T, InsertPos); 4843 return QualType(T, 0); 4844 } 4845 4846 QualType 4847 ASTContext::getParenType(QualType InnerType) const { 4848 llvm::FoldingSetNodeID ID; 4849 ParenType::Profile(ID, InnerType); 4850 4851 void *InsertPos = nullptr; 4852 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 4853 if (T) 4854 return QualType(T, 0); 4855 4856 QualType Canon = InnerType; 4857 if (!Canon.isCanonical()) { 4858 Canon = getCanonicalType(InnerType); 4859 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 4860 assert(!CheckT && "Paren canonical type broken"); 4861 (void)CheckT; 4862 } 4863 4864 T = new (*this, TypeAlignment) ParenType(InnerType, Canon); 4865 Types.push_back(T); 4866 ParenTypes.InsertNode(T, InsertPos); 4867 return QualType(T, 0); 4868 } 4869 4870 QualType 4871 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 4872 const IdentifierInfo *MacroII) const { 4873 QualType Canon = UnderlyingTy; 4874 if (!Canon.isCanonical()) 4875 Canon = getCanonicalType(UnderlyingTy); 4876 4877 auto *newType = new (*this, TypeAlignment) 4878 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 4879 Types.push_back(newType); 4880 return QualType(newType, 0); 4881 } 4882 4883 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 4884 NestedNameSpecifier *NNS, 4885 const IdentifierInfo *Name, 4886 QualType Canon) const { 4887 if (Canon.isNull()) { 4888 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 4889 if (CanonNNS != NNS) 4890 Canon = getDependentNameType(Keyword, CanonNNS, Name); 4891 } 4892 4893 llvm::FoldingSetNodeID ID; 4894 DependentNameType::Profile(ID, Keyword, NNS, Name); 4895 4896 void *InsertPos = nullptr; 4897 DependentNameType *T 4898 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 4899 if (T) 4900 return QualType(T, 0); 4901 4902 T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); 4903 Types.push_back(T); 4904 DependentNameTypes.InsertNode(T, InsertPos); 4905 return QualType(T, 0); 4906 } 4907 4908 QualType 4909 ASTContext::getDependentTemplateSpecializationType( 4910 ElaboratedTypeKeyword Keyword, 4911 NestedNameSpecifier *NNS, 4912 const IdentifierInfo *Name, 4913 const TemplateArgumentListInfo &Args) const { 4914 // TODO: avoid this copy 4915 SmallVector<TemplateArgument, 16> ArgCopy; 4916 for (unsigned I = 0, E = Args.size(); I != E; ++I) 4917 ArgCopy.push_back(Args[I].getArgument()); 4918 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 4919 } 4920 4921 QualType 4922 ASTContext::getDependentTemplateSpecializationType( 4923 ElaboratedTypeKeyword Keyword, 4924 NestedNameSpecifier *NNS, 4925 const IdentifierInfo *Name, 4926 ArrayRef<TemplateArgument> Args) const { 4927 assert((!NNS || NNS->isDependent()) && 4928 "nested-name-specifier must be dependent"); 4929 4930 llvm::FoldingSetNodeID ID; 4931 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 4932 Name, Args); 4933 4934 void *InsertPos = nullptr; 4935 DependentTemplateSpecializationType *T 4936 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4937 if (T) 4938 return QualType(T, 0); 4939 4940 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 4941 4942 ElaboratedTypeKeyword CanonKeyword = Keyword; 4943 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 4944 4945 bool AnyNonCanonArgs = false; 4946 unsigned NumArgs = Args.size(); 4947 SmallVector<TemplateArgument, 16> CanonArgs(NumArgs); 4948 for (unsigned I = 0; I != NumArgs; ++I) { 4949 CanonArgs[I] = getCanonicalTemplateArgument(Args[I]); 4950 if (!CanonArgs[I].structurallyEquals(Args[I])) 4951 AnyNonCanonArgs = true; 4952 } 4953 4954 QualType Canon; 4955 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 4956 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 4957 Name, 4958 CanonArgs); 4959 4960 // Find the insert position again. 4961 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4962 } 4963 4964 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 4965 sizeof(TemplateArgument) * NumArgs), 4966 TypeAlignment); 4967 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 4968 Name, Args, Canon); 4969 Types.push_back(T); 4970 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 4971 return QualType(T, 0); 4972 } 4973 4974 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 4975 TemplateArgument Arg; 4976 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 4977 QualType ArgType = getTypeDeclType(TTP); 4978 if (TTP->isParameterPack()) 4979 ArgType = getPackExpansionType(ArgType, None); 4980 4981 Arg = TemplateArgument(ArgType); 4982 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 4983 QualType T = 4984 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 4985 // For class NTTPs, ensure we include the 'const' so the type matches that 4986 // of a real template argument. 4987 // FIXME: It would be more faithful to model this as something like an 4988 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 4989 if (T->isRecordType()) 4990 T.addConst(); 4991 Expr *E = new (*this) DeclRefExpr( 4992 *this, NTTP, /*enclosing*/ false, T, 4993 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 4994 4995 if (NTTP->isParameterPack()) 4996 E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(), 4997 None); 4998 Arg = TemplateArgument(E); 4999 } else { 5000 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5001 if (TTP->isParameterPack()) 5002 Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>()); 5003 else 5004 Arg = TemplateArgument(TemplateName(TTP)); 5005 } 5006 5007 if (Param->isTemplateParameterPack()) 5008 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5009 5010 return Arg; 5011 } 5012 5013 void 5014 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5015 SmallVectorImpl<TemplateArgument> &Args) { 5016 Args.reserve(Args.size() + Params->size()); 5017 5018 for (NamedDecl *Param : *Params) 5019 Args.push_back(getInjectedTemplateArg(Param)); 5020 } 5021 5022 QualType ASTContext::getPackExpansionType(QualType Pattern, 5023 Optional<unsigned> NumExpansions, 5024 bool ExpectPackInType) { 5025 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5026 "Pack expansions must expand one or more parameter packs"); 5027 5028 llvm::FoldingSetNodeID ID; 5029 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5030 5031 void *InsertPos = nullptr; 5032 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5033 if (T) 5034 return QualType(T, 0); 5035 5036 QualType Canon; 5037 if (!Pattern.isCanonical()) { 5038 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5039 /*ExpectPackInType=*/false); 5040 5041 // Find the insert position again, in case we inserted an element into 5042 // PackExpansionTypes and invalidated our insert position. 5043 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5044 } 5045 5046 T = new (*this, TypeAlignment) 5047 PackExpansionType(Pattern, Canon, NumExpansions); 5048 Types.push_back(T); 5049 PackExpansionTypes.InsertNode(T, InsertPos); 5050 return QualType(T, 0); 5051 } 5052 5053 /// CmpProtocolNames - Comparison predicate for sorting protocols 5054 /// alphabetically. 5055 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5056 ObjCProtocolDecl *const *RHS) { 5057 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5058 } 5059 5060 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5061 if (Protocols.empty()) return true; 5062 5063 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5064 return false; 5065 5066 for (unsigned i = 1; i != Protocols.size(); ++i) 5067 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5068 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5069 return false; 5070 return true; 5071 } 5072 5073 static void 5074 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5075 // Sort protocols, keyed by name. 5076 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5077 5078 // Canonicalize. 5079 for (ObjCProtocolDecl *&P : Protocols) 5080 P = P->getCanonicalDecl(); 5081 5082 // Remove duplicates. 5083 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5084 Protocols.erase(ProtocolsEnd, Protocols.end()); 5085 } 5086 5087 QualType ASTContext::getObjCObjectType(QualType BaseType, 5088 ObjCProtocolDecl * const *Protocols, 5089 unsigned NumProtocols) const { 5090 return getObjCObjectType(BaseType, {}, 5091 llvm::makeArrayRef(Protocols, NumProtocols), 5092 /*isKindOf=*/false); 5093 } 5094 5095 QualType ASTContext::getObjCObjectType( 5096 QualType baseType, 5097 ArrayRef<QualType> typeArgs, 5098 ArrayRef<ObjCProtocolDecl *> protocols, 5099 bool isKindOf) const { 5100 // If the base type is an interface and there aren't any protocols or 5101 // type arguments to add, then the interface type will do just fine. 5102 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5103 isa<ObjCInterfaceType>(baseType)) 5104 return baseType; 5105 5106 // Look in the folding set for an existing type. 5107 llvm::FoldingSetNodeID ID; 5108 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5109 void *InsertPos = nullptr; 5110 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5111 return QualType(QT, 0); 5112 5113 // Determine the type arguments to be used for canonicalization, 5114 // which may be explicitly specified here or written on the base 5115 // type. 5116 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5117 if (effectiveTypeArgs.empty()) { 5118 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5119 effectiveTypeArgs = baseObject->getTypeArgs(); 5120 } 5121 5122 // Build the canonical type, which has the canonical base type and a 5123 // sorted-and-uniqued list of protocols and the type arguments 5124 // canonicalized. 5125 QualType canonical; 5126 bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(), 5127 effectiveTypeArgs.end(), 5128 [&](QualType type) { 5129 return type.isCanonical(); 5130 }); 5131 bool protocolsSorted = areSortedAndUniqued(protocols); 5132 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5133 // Determine the canonical type arguments. 5134 ArrayRef<QualType> canonTypeArgs; 5135 SmallVector<QualType, 4> canonTypeArgsVec; 5136 if (!typeArgsAreCanonical) { 5137 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5138 for (auto typeArg : effectiveTypeArgs) 5139 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5140 canonTypeArgs = canonTypeArgsVec; 5141 } else { 5142 canonTypeArgs = effectiveTypeArgs; 5143 } 5144 5145 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5146 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5147 if (!protocolsSorted) { 5148 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5149 SortAndUniqueProtocols(canonProtocolsVec); 5150 canonProtocols = canonProtocolsVec; 5151 } else { 5152 canonProtocols = protocols; 5153 } 5154 5155 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5156 canonProtocols, isKindOf); 5157 5158 // Regenerate InsertPos. 5159 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5160 } 5161 5162 unsigned size = sizeof(ObjCObjectTypeImpl); 5163 size += typeArgs.size() * sizeof(QualType); 5164 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5165 void *mem = Allocate(size, TypeAlignment); 5166 auto *T = 5167 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5168 isKindOf); 5169 5170 Types.push_back(T); 5171 ObjCObjectTypes.InsertNode(T, InsertPos); 5172 return QualType(T, 0); 5173 } 5174 5175 /// Apply Objective-C protocol qualifiers to the given type. 5176 /// If this is for the canonical type of a type parameter, we can apply 5177 /// protocol qualifiers on the ObjCObjectPointerType. 5178 QualType 5179 ASTContext::applyObjCProtocolQualifiers(QualType type, 5180 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5181 bool allowOnPointerType) const { 5182 hasError = false; 5183 5184 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5185 return getObjCTypeParamType(objT->getDecl(), protocols); 5186 } 5187 5188 // Apply protocol qualifiers to ObjCObjectPointerType. 5189 if (allowOnPointerType) { 5190 if (const auto *objPtr = 5191 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5192 const ObjCObjectType *objT = objPtr->getObjectType(); 5193 // Merge protocol lists and construct ObjCObjectType. 5194 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5195 protocolsVec.append(objT->qual_begin(), 5196 objT->qual_end()); 5197 protocolsVec.append(protocols.begin(), protocols.end()); 5198 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5199 type = getObjCObjectType( 5200 objT->getBaseType(), 5201 objT->getTypeArgsAsWritten(), 5202 protocols, 5203 objT->isKindOfTypeAsWritten()); 5204 return getObjCObjectPointerType(type); 5205 } 5206 } 5207 5208 // Apply protocol qualifiers to ObjCObjectType. 5209 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5210 // FIXME: Check for protocols to which the class type is already 5211 // known to conform. 5212 5213 return getObjCObjectType(objT->getBaseType(), 5214 objT->getTypeArgsAsWritten(), 5215 protocols, 5216 objT->isKindOfTypeAsWritten()); 5217 } 5218 5219 // If the canonical type is ObjCObjectType, ... 5220 if (type->isObjCObjectType()) { 5221 // Silently overwrite any existing protocol qualifiers. 5222 // TODO: determine whether that's the right thing to do. 5223 5224 // FIXME: Check for protocols to which the class type is already 5225 // known to conform. 5226 return getObjCObjectType(type, {}, protocols, false); 5227 } 5228 5229 // id<protocol-list> 5230 if (type->isObjCIdType()) { 5231 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5232 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5233 objPtr->isKindOfType()); 5234 return getObjCObjectPointerType(type); 5235 } 5236 5237 // Class<protocol-list> 5238 if (type->isObjCClassType()) { 5239 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5240 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5241 objPtr->isKindOfType()); 5242 return getObjCObjectPointerType(type); 5243 } 5244 5245 hasError = true; 5246 return type; 5247 } 5248 5249 QualType 5250 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5251 ArrayRef<ObjCProtocolDecl *> protocols) const { 5252 // Look in the folding set for an existing type. 5253 llvm::FoldingSetNodeID ID; 5254 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5255 void *InsertPos = nullptr; 5256 if (ObjCTypeParamType *TypeParam = 5257 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5258 return QualType(TypeParam, 0); 5259 5260 // We canonicalize to the underlying type. 5261 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5262 if (!protocols.empty()) { 5263 // Apply the protocol qualifers. 5264 bool hasError; 5265 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5266 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5267 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5268 } 5269 5270 unsigned size = sizeof(ObjCTypeParamType); 5271 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5272 void *mem = Allocate(size, TypeAlignment); 5273 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5274 5275 Types.push_back(newType); 5276 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5277 return QualType(newType, 0); 5278 } 5279 5280 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5281 ObjCTypeParamDecl *New) const { 5282 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5283 // Update TypeForDecl after updating TypeSourceInfo. 5284 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5285 SmallVector<ObjCProtocolDecl *, 8> protocols; 5286 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5287 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5288 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5289 } 5290 5291 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5292 /// protocol list adopt all protocols in QT's qualified-id protocol 5293 /// list. 5294 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5295 ObjCInterfaceDecl *IC) { 5296 if (!QT->isObjCQualifiedIdType()) 5297 return false; 5298 5299 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5300 // If both the right and left sides have qualifiers. 5301 for (auto *Proto : OPT->quals()) { 5302 if (!IC->ClassImplementsProtocol(Proto, false)) 5303 return false; 5304 } 5305 return true; 5306 } 5307 return false; 5308 } 5309 5310 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5311 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5312 /// of protocols. 5313 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5314 ObjCInterfaceDecl *IDecl) { 5315 if (!QT->isObjCQualifiedIdType()) 5316 return false; 5317 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5318 if (!OPT) 5319 return false; 5320 if (!IDecl->hasDefinition()) 5321 return false; 5322 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5323 CollectInheritedProtocols(IDecl, InheritedProtocols); 5324 if (InheritedProtocols.empty()) 5325 return false; 5326 // Check that if every protocol in list of id<plist> conforms to a protocol 5327 // of IDecl's, then bridge casting is ok. 5328 bool Conforms = false; 5329 for (auto *Proto : OPT->quals()) { 5330 Conforms = false; 5331 for (auto *PI : InheritedProtocols) { 5332 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5333 Conforms = true; 5334 break; 5335 } 5336 } 5337 if (!Conforms) 5338 break; 5339 } 5340 if (Conforms) 5341 return true; 5342 5343 for (auto *PI : InheritedProtocols) { 5344 // If both the right and left sides have qualifiers. 5345 bool Adopts = false; 5346 for (auto *Proto : OPT->quals()) { 5347 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5348 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5349 break; 5350 } 5351 if (!Adopts) 5352 return false; 5353 } 5354 return true; 5355 } 5356 5357 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5358 /// the given object type. 5359 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5360 llvm::FoldingSetNodeID ID; 5361 ObjCObjectPointerType::Profile(ID, ObjectT); 5362 5363 void *InsertPos = nullptr; 5364 if (ObjCObjectPointerType *QT = 5365 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5366 return QualType(QT, 0); 5367 5368 // Find the canonical object type. 5369 QualType Canonical; 5370 if (!ObjectT.isCanonical()) { 5371 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5372 5373 // Regenerate InsertPos. 5374 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5375 } 5376 5377 // No match. 5378 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 5379 auto *QType = 5380 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5381 5382 Types.push_back(QType); 5383 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5384 return QualType(QType, 0); 5385 } 5386 5387 /// getObjCInterfaceType - Return the unique reference to the type for the 5388 /// specified ObjC interface decl. The list of protocols is optional. 5389 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5390 ObjCInterfaceDecl *PrevDecl) const { 5391 if (Decl->TypeForDecl) 5392 return QualType(Decl->TypeForDecl, 0); 5393 5394 if (PrevDecl) { 5395 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5396 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5397 return QualType(PrevDecl->TypeForDecl, 0); 5398 } 5399 5400 // Prefer the definition, if there is one. 5401 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5402 Decl = Def; 5403 5404 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 5405 auto *T = new (Mem) ObjCInterfaceType(Decl); 5406 Decl->TypeForDecl = T; 5407 Types.push_back(T); 5408 return QualType(T, 0); 5409 } 5410 5411 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5412 /// TypeOfExprType AST's (since expression's are never shared). For example, 5413 /// multiple declarations that refer to "typeof(x)" all contain different 5414 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5415 /// on canonical type's (which are always unique). 5416 QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { 5417 TypeOfExprType *toe; 5418 if (tofExpr->isTypeDependent()) { 5419 llvm::FoldingSetNodeID ID; 5420 DependentTypeOfExprType::Profile(ID, *this, tofExpr); 5421 5422 void *InsertPos = nullptr; 5423 DependentTypeOfExprType *Canon 5424 = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5425 if (Canon) { 5426 // We already have a "canonical" version of an identical, dependent 5427 // typeof(expr) type. Use that as our canonical type. 5428 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, 5429 QualType((TypeOfExprType*)Canon, 0)); 5430 } else { 5431 // Build a new, canonical typeof(expr) type. 5432 Canon 5433 = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); 5434 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5435 toe = Canon; 5436 } 5437 } else { 5438 QualType Canonical = getCanonicalType(tofExpr->getType()); 5439 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); 5440 } 5441 Types.push_back(toe); 5442 return QualType(toe, 0); 5443 } 5444 5445 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5446 /// TypeOfType nodes. The only motivation to unique these nodes would be 5447 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5448 /// an issue. This doesn't affect the type checker, since it operates 5449 /// on canonical types (which are always unique). 5450 QualType ASTContext::getTypeOfType(QualType tofType) const { 5451 QualType Canonical = getCanonicalType(tofType); 5452 auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); 5453 Types.push_back(tot); 5454 return QualType(tot, 0); 5455 } 5456 5457 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5458 /// nodes. This would never be helpful, since each such type has its own 5459 /// expression, and would not give a significant memory saving, since there 5460 /// is an Expr tree under each such type. 5461 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5462 DecltypeType *dt; 5463 5464 // C++11 [temp.type]p2: 5465 // If an expression e involves a template parameter, decltype(e) denotes a 5466 // unique dependent type. Two such decltype-specifiers refer to the same 5467 // type only if their expressions are equivalent (14.5.6.1). 5468 if (e->isInstantiationDependent()) { 5469 llvm::FoldingSetNodeID ID; 5470 DependentDecltypeType::Profile(ID, *this, e); 5471 5472 void *InsertPos = nullptr; 5473 DependentDecltypeType *Canon 5474 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5475 if (!Canon) { 5476 // Build a new, canonical decltype(expr) type. 5477 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 5478 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5479 } 5480 dt = new (*this, TypeAlignment) 5481 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5482 } else { 5483 dt = new (*this, TypeAlignment) 5484 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5485 } 5486 Types.push_back(dt); 5487 return QualType(dt, 0); 5488 } 5489 5490 /// getUnaryTransformationType - We don't unique these, since the memory 5491 /// savings are minimal and these are rare. 5492 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5493 QualType UnderlyingType, 5494 UnaryTransformType::UTTKind Kind) 5495 const { 5496 UnaryTransformType *ut = nullptr; 5497 5498 if (BaseType->isDependentType()) { 5499 // Look in the folding set for an existing type. 5500 llvm::FoldingSetNodeID ID; 5501 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5502 5503 void *InsertPos = nullptr; 5504 DependentUnaryTransformType *Canon 5505 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5506 5507 if (!Canon) { 5508 // Build a new, canonical __underlying_type(type) type. 5509 Canon = new (*this, TypeAlignment) 5510 DependentUnaryTransformType(*this, getCanonicalType(BaseType), 5511 Kind); 5512 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5513 } 5514 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5515 QualType(), Kind, 5516 QualType(Canon, 0)); 5517 } else { 5518 QualType CanonType = getCanonicalType(UnderlyingType); 5519 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5520 UnderlyingType, Kind, 5521 CanonType); 5522 } 5523 Types.push_back(ut); 5524 return QualType(ut, 0); 5525 } 5526 5527 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5528 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5529 /// canonical deduced-but-dependent 'auto' type. 5530 QualType 5531 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5532 bool IsDependent, bool IsPack, 5533 ConceptDecl *TypeConstraintConcept, 5534 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5535 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5536 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5537 !TypeConstraintConcept && !IsDependent) 5538 return getAutoDeductType(); 5539 5540 // Look in the folding set for an existing type. 5541 void *InsertPos = nullptr; 5542 llvm::FoldingSetNodeID ID; 5543 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5544 TypeConstraintConcept, TypeConstraintArgs); 5545 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5546 return QualType(AT, 0); 5547 5548 void *Mem = Allocate(sizeof(AutoType) + 5549 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5550 TypeAlignment); 5551 auto *AT = new (Mem) AutoType( 5552 DeducedType, Keyword, 5553 (IsDependent ? TypeDependence::DependentInstantiation 5554 : TypeDependence::None) | 5555 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5556 TypeConstraintConcept, TypeConstraintArgs); 5557 Types.push_back(AT); 5558 if (InsertPos) 5559 AutoTypes.InsertNode(AT, InsertPos); 5560 return QualType(AT, 0); 5561 } 5562 5563 /// Return the uniqued reference to the deduced template specialization type 5564 /// which has been deduced to the given type, or to the canonical undeduced 5565 /// such type, or the canonical deduced-but-dependent such type. 5566 QualType ASTContext::getDeducedTemplateSpecializationType( 5567 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5568 // Look in the folding set for an existing type. 5569 void *InsertPos = nullptr; 5570 llvm::FoldingSetNodeID ID; 5571 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5572 IsDependent); 5573 if (DeducedTemplateSpecializationType *DTST = 5574 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5575 return QualType(DTST, 0); 5576 5577 auto *DTST = new (*this, TypeAlignment) 5578 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5579 Types.push_back(DTST); 5580 if (InsertPos) 5581 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5582 return QualType(DTST, 0); 5583 } 5584 5585 /// getAtomicType - Return the uniqued reference to the atomic type for 5586 /// the given value type. 5587 QualType ASTContext::getAtomicType(QualType T) const { 5588 // Unique pointers, to guarantee there is only one pointer of a particular 5589 // structure. 5590 llvm::FoldingSetNodeID ID; 5591 AtomicType::Profile(ID, T); 5592 5593 void *InsertPos = nullptr; 5594 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5595 return QualType(AT, 0); 5596 5597 // If the atomic value type isn't canonical, this won't be a canonical type 5598 // either, so fill in the canonical type field. 5599 QualType Canonical; 5600 if (!T.isCanonical()) { 5601 Canonical = getAtomicType(getCanonicalType(T)); 5602 5603 // Get the new insert position for the node we care about. 5604 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5605 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5606 } 5607 auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 5608 Types.push_back(New); 5609 AtomicTypes.InsertNode(New, InsertPos); 5610 return QualType(New, 0); 5611 } 5612 5613 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5614 QualType ASTContext::getAutoDeductType() const { 5615 if (AutoDeductTy.isNull()) 5616 AutoDeductTy = QualType(new (*this, TypeAlignment) 5617 AutoType(QualType(), AutoTypeKeyword::Auto, 5618 TypeDependence::None, 5619 /*concept*/ nullptr, /*args*/ {}), 5620 0); 5621 return AutoDeductTy; 5622 } 5623 5624 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5625 QualType ASTContext::getAutoRRefDeductType() const { 5626 if (AutoRRefDeductTy.isNull()) 5627 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5628 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5629 return AutoRRefDeductTy; 5630 } 5631 5632 /// getTagDeclType - Return the unique reference to the type for the 5633 /// specified TagDecl (struct/union/class/enum) decl. 5634 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5635 assert(Decl); 5636 // FIXME: What is the design on getTagDeclType when it requires casting 5637 // away const? mutable? 5638 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5639 } 5640 5641 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5642 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5643 /// needs to agree with the definition in <stddef.h>. 5644 CanQualType ASTContext::getSizeType() const { 5645 return getFromTargetType(Target->getSizeType()); 5646 } 5647 5648 /// Return the unique signed counterpart of the integer type 5649 /// corresponding to size_t. 5650 CanQualType ASTContext::getSignedSizeType() const { 5651 return getFromTargetType(Target->getSignedSizeType()); 5652 } 5653 5654 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5655 CanQualType ASTContext::getIntMaxType() const { 5656 return getFromTargetType(Target->getIntMaxType()); 5657 } 5658 5659 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5660 CanQualType ASTContext::getUIntMaxType() const { 5661 return getFromTargetType(Target->getUIntMaxType()); 5662 } 5663 5664 /// getSignedWCharType - Return the type of "signed wchar_t". 5665 /// Used when in C++, as a GCC extension. 5666 QualType ASTContext::getSignedWCharType() const { 5667 // FIXME: derive from "Target" ? 5668 return WCharTy; 5669 } 5670 5671 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5672 /// Used when in C++, as a GCC extension. 5673 QualType ASTContext::getUnsignedWCharType() const { 5674 // FIXME: derive from "Target" ? 5675 return UnsignedIntTy; 5676 } 5677 5678 QualType ASTContext::getIntPtrType() const { 5679 return getFromTargetType(Target->getIntPtrType()); 5680 } 5681 5682 QualType ASTContext::getUIntPtrType() const { 5683 return getCorrespondingUnsignedType(getIntPtrType()); 5684 } 5685 5686 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5687 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5688 QualType ASTContext::getPointerDiffType() const { 5689 return getFromTargetType(Target->getPtrDiffType(0)); 5690 } 5691 5692 /// Return the unique unsigned counterpart of "ptrdiff_t" 5693 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5694 /// in the definition of %tu format specifier. 5695 QualType ASTContext::getUnsignedPointerDiffType() const { 5696 return getFromTargetType(Target->getUnsignedPtrDiffType(0)); 5697 } 5698 5699 /// Return the unique type for "pid_t" defined in 5700 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5701 QualType ASTContext::getProcessIDType() const { 5702 return getFromTargetType(Target->getProcessIDType()); 5703 } 5704 5705 //===----------------------------------------------------------------------===// 5706 // Type Operators 5707 //===----------------------------------------------------------------------===// 5708 5709 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5710 // Push qualifiers into arrays, and then discard any remaining 5711 // qualifiers. 5712 T = getCanonicalType(T); 5713 T = getVariableArrayDecayedType(T); 5714 const Type *Ty = T.getTypePtr(); 5715 QualType Result; 5716 if (isa<ArrayType>(Ty)) { 5717 Result = getArrayDecayedType(QualType(Ty,0)); 5718 } else if (isa<FunctionType>(Ty)) { 5719 Result = getPointerType(QualType(Ty, 0)); 5720 } else { 5721 Result = QualType(Ty, 0); 5722 } 5723 5724 return CanQualType::CreateUnsafe(Result); 5725 } 5726 5727 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5728 Qualifiers &quals) { 5729 SplitQualType splitType = type.getSplitUnqualifiedType(); 5730 5731 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5732 // the unqualified desugared type and then drops it on the floor. 5733 // We then have to strip that sugar back off with 5734 // getUnqualifiedDesugaredType(), which is silly. 5735 const auto *AT = 5736 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5737 5738 // If we don't have an array, just use the results in splitType. 5739 if (!AT) { 5740 quals = splitType.Quals; 5741 return QualType(splitType.Ty, 0); 5742 } 5743 5744 // Otherwise, recurse on the array's element type. 5745 QualType elementType = AT->getElementType(); 5746 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 5747 5748 // If that didn't change the element type, AT has no qualifiers, so we 5749 // can just use the results in splitType. 5750 if (elementType == unqualElementType) { 5751 assert(quals.empty()); // from the recursive call 5752 quals = splitType.Quals; 5753 return QualType(splitType.Ty, 0); 5754 } 5755 5756 // Otherwise, add in the qualifiers from the outermost type, then 5757 // build the type back up. 5758 quals.addConsistentQualifiers(splitType.Quals); 5759 5760 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 5761 return getConstantArrayType(unqualElementType, CAT->getSize(), 5762 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 5763 } 5764 5765 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 5766 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 5767 } 5768 5769 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 5770 return getVariableArrayType(unqualElementType, 5771 VAT->getSizeExpr(), 5772 VAT->getSizeModifier(), 5773 VAT->getIndexTypeCVRQualifiers(), 5774 VAT->getBracketsRange()); 5775 } 5776 5777 const auto *DSAT = cast<DependentSizedArrayType>(AT); 5778 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 5779 DSAT->getSizeModifier(), 0, 5780 SourceRange()); 5781 } 5782 5783 /// Attempt to unwrap two types that may both be array types with the same bound 5784 /// (or both be array types of unknown bound) for the purpose of comparing the 5785 /// cv-decomposition of two types per C++ [conv.qual]. 5786 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) { 5787 while (true) { 5788 auto *AT1 = getAsArrayType(T1); 5789 if (!AT1) 5790 return; 5791 5792 auto *AT2 = getAsArrayType(T2); 5793 if (!AT2) 5794 return; 5795 5796 // If we don't have two array types with the same constant bound nor two 5797 // incomplete array types, we've unwrapped everything we can. 5798 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 5799 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 5800 if (!CAT2 || CAT1->getSize() != CAT2->getSize()) 5801 return; 5802 } else if (!isa<IncompleteArrayType>(AT1) || 5803 !isa<IncompleteArrayType>(AT2)) { 5804 return; 5805 } 5806 5807 T1 = AT1->getElementType(); 5808 T2 = AT2->getElementType(); 5809 } 5810 } 5811 5812 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 5813 /// 5814 /// If T1 and T2 are both pointer types of the same kind, or both array types 5815 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 5816 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 5817 /// 5818 /// This function will typically be called in a loop that successively 5819 /// "unwraps" pointer and pointer-to-member types to compare them at each 5820 /// level. 5821 /// 5822 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 5823 /// pair of types that can't be unwrapped further. 5824 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2) { 5825 UnwrapSimilarArrayTypes(T1, T2); 5826 5827 const auto *T1PtrType = T1->getAs<PointerType>(); 5828 const auto *T2PtrType = T2->getAs<PointerType>(); 5829 if (T1PtrType && T2PtrType) { 5830 T1 = T1PtrType->getPointeeType(); 5831 T2 = T2PtrType->getPointeeType(); 5832 return true; 5833 } 5834 5835 const auto *T1MPType = T1->getAs<MemberPointerType>(); 5836 const auto *T2MPType = T2->getAs<MemberPointerType>(); 5837 if (T1MPType && T2MPType && 5838 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 5839 QualType(T2MPType->getClass(), 0))) { 5840 T1 = T1MPType->getPointeeType(); 5841 T2 = T2MPType->getPointeeType(); 5842 return true; 5843 } 5844 5845 if (getLangOpts().ObjC) { 5846 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 5847 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 5848 if (T1OPType && T2OPType) { 5849 T1 = T1OPType->getPointeeType(); 5850 T2 = T2OPType->getPointeeType(); 5851 return true; 5852 } 5853 } 5854 5855 // FIXME: Block pointers, too? 5856 5857 return false; 5858 } 5859 5860 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 5861 while (true) { 5862 Qualifiers Quals; 5863 T1 = getUnqualifiedArrayType(T1, Quals); 5864 T2 = getUnqualifiedArrayType(T2, Quals); 5865 if (hasSameType(T1, T2)) 5866 return true; 5867 if (!UnwrapSimilarTypes(T1, T2)) 5868 return false; 5869 } 5870 } 5871 5872 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 5873 while (true) { 5874 Qualifiers Quals1, Quals2; 5875 T1 = getUnqualifiedArrayType(T1, Quals1); 5876 T2 = getUnqualifiedArrayType(T2, Quals2); 5877 5878 Quals1.removeCVRQualifiers(); 5879 Quals2.removeCVRQualifiers(); 5880 if (Quals1 != Quals2) 5881 return false; 5882 5883 if (hasSameType(T1, T2)) 5884 return true; 5885 5886 if (!UnwrapSimilarTypes(T1, T2)) 5887 return false; 5888 } 5889 } 5890 5891 DeclarationNameInfo 5892 ASTContext::getNameForTemplate(TemplateName Name, 5893 SourceLocation NameLoc) const { 5894 switch (Name.getKind()) { 5895 case TemplateName::QualifiedTemplate: 5896 case TemplateName::Template: 5897 // DNInfo work in progress: CHECKME: what about DNLoc? 5898 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 5899 NameLoc); 5900 5901 case TemplateName::OverloadedTemplate: { 5902 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 5903 // DNInfo work in progress: CHECKME: what about DNLoc? 5904 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 5905 } 5906 5907 case TemplateName::AssumedTemplate: { 5908 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 5909 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 5910 } 5911 5912 case TemplateName::DependentTemplate: { 5913 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 5914 DeclarationName DName; 5915 if (DTN->isIdentifier()) { 5916 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 5917 return DeclarationNameInfo(DName, NameLoc); 5918 } else { 5919 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 5920 // DNInfo work in progress: FIXME: source locations? 5921 DeclarationNameLoc DNLoc = 5922 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 5923 return DeclarationNameInfo(DName, NameLoc, DNLoc); 5924 } 5925 } 5926 5927 case TemplateName::SubstTemplateTemplateParm: { 5928 SubstTemplateTemplateParmStorage *subst 5929 = Name.getAsSubstTemplateTemplateParm(); 5930 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 5931 NameLoc); 5932 } 5933 5934 case TemplateName::SubstTemplateTemplateParmPack: { 5935 SubstTemplateTemplateParmPackStorage *subst 5936 = Name.getAsSubstTemplateTemplateParmPack(); 5937 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 5938 NameLoc); 5939 } 5940 } 5941 5942 llvm_unreachable("bad template name kind!"); 5943 } 5944 5945 TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { 5946 switch (Name.getKind()) { 5947 case TemplateName::QualifiedTemplate: 5948 case TemplateName::Template: { 5949 TemplateDecl *Template = Name.getAsTemplateDecl(); 5950 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 5951 Template = getCanonicalTemplateTemplateParmDecl(TTP); 5952 5953 // The canonical template name is the canonical template declaration. 5954 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 5955 } 5956 5957 case TemplateName::OverloadedTemplate: 5958 case TemplateName::AssumedTemplate: 5959 llvm_unreachable("cannot canonicalize unresolved template"); 5960 5961 case TemplateName::DependentTemplate: { 5962 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 5963 assert(DTN && "Non-dependent template names must refer to template decls."); 5964 return DTN->CanonicalTemplateName; 5965 } 5966 5967 case TemplateName::SubstTemplateTemplateParm: { 5968 SubstTemplateTemplateParmStorage *subst 5969 = Name.getAsSubstTemplateTemplateParm(); 5970 return getCanonicalTemplateName(subst->getReplacement()); 5971 } 5972 5973 case TemplateName::SubstTemplateTemplateParmPack: { 5974 SubstTemplateTemplateParmPackStorage *subst 5975 = Name.getAsSubstTemplateTemplateParmPack(); 5976 TemplateTemplateParmDecl *canonParameter 5977 = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); 5978 TemplateArgument canonArgPack 5979 = getCanonicalTemplateArgument(subst->getArgumentPack()); 5980 return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); 5981 } 5982 } 5983 5984 llvm_unreachable("bad template name!"); 5985 } 5986 5987 bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) { 5988 X = getCanonicalTemplateName(X); 5989 Y = getCanonicalTemplateName(Y); 5990 return X.getAsVoidPointer() == Y.getAsVoidPointer(); 5991 } 5992 5993 TemplateArgument 5994 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 5995 switch (Arg.getKind()) { 5996 case TemplateArgument::Null: 5997 return Arg; 5998 5999 case TemplateArgument::Expression: 6000 return Arg; 6001 6002 case TemplateArgument::Declaration: { 6003 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6004 return TemplateArgument(D, Arg.getParamTypeForDecl()); 6005 } 6006 6007 case TemplateArgument::NullPtr: 6008 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6009 /*isNullPtr*/true); 6010 6011 case TemplateArgument::Template: 6012 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); 6013 6014 case TemplateArgument::TemplateExpansion: 6015 return TemplateArgument(getCanonicalTemplateName( 6016 Arg.getAsTemplateOrTemplatePattern()), 6017 Arg.getNumTemplateExpansions()); 6018 6019 case TemplateArgument::Integral: 6020 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6021 6022 case TemplateArgument::Type: 6023 return TemplateArgument(getCanonicalType(Arg.getAsType())); 6024 6025 case TemplateArgument::Pack: { 6026 if (Arg.pack_size() == 0) 6027 return Arg; 6028 6029 auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()]; 6030 unsigned Idx = 0; 6031 for (TemplateArgument::pack_iterator A = Arg.pack_begin(), 6032 AEnd = Arg.pack_end(); 6033 A != AEnd; (void)++A, ++Idx) 6034 CanonArgs[Idx] = getCanonicalTemplateArgument(*A); 6035 6036 return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size())); 6037 } 6038 } 6039 6040 // Silence GCC warning 6041 llvm_unreachable("Unhandled template argument kind"); 6042 } 6043 6044 NestedNameSpecifier * 6045 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6046 if (!NNS) 6047 return nullptr; 6048 6049 switch (NNS->getKind()) { 6050 case NestedNameSpecifier::Identifier: 6051 // Canonicalize the prefix but keep the identifier the same. 6052 return NestedNameSpecifier::Create(*this, 6053 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6054 NNS->getAsIdentifier()); 6055 6056 case NestedNameSpecifier::Namespace: 6057 // A namespace is canonical; build a nested-name-specifier with 6058 // this namespace and no prefix. 6059 return NestedNameSpecifier::Create(*this, nullptr, 6060 NNS->getAsNamespace()->getOriginalNamespace()); 6061 6062 case NestedNameSpecifier::NamespaceAlias: 6063 // A namespace is canonical; build a nested-name-specifier with 6064 // this namespace and no prefix. 6065 return NestedNameSpecifier::Create(*this, nullptr, 6066 NNS->getAsNamespaceAlias()->getNamespace() 6067 ->getOriginalNamespace()); 6068 6069 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6070 // latter will have the 'template' keyword when printed. 6071 case NestedNameSpecifier::TypeSpec: 6072 case NestedNameSpecifier::TypeSpecWithTemplate: { 6073 const Type *T = getCanonicalType(NNS->getAsType()); 6074 6075 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6076 // break it apart into its prefix and identifier, then reconsititute those 6077 // as the canonical nested-name-specifier. This is required to canonicalize 6078 // a dependent nested-name-specifier involving typedefs of dependent-name 6079 // types, e.g., 6080 // typedef typename T::type T1; 6081 // typedef typename T1::type T2; 6082 if (const auto *DNT = T->getAs<DependentNameType>()) 6083 return NestedNameSpecifier::Create( 6084 *this, DNT->getQualifier(), 6085 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6086 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6087 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6088 const_cast<Type *>(T)); 6089 6090 // TODO: Set 'Template' parameter to true for other template types. 6091 return NestedNameSpecifier::Create(*this, nullptr, false, 6092 const_cast<Type *>(T)); 6093 } 6094 6095 case NestedNameSpecifier::Global: 6096 case NestedNameSpecifier::Super: 6097 // The global specifier and __super specifer are canonical and unique. 6098 return NNS; 6099 } 6100 6101 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6102 } 6103 6104 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6105 // Handle the non-qualified case efficiently. 6106 if (!T.hasLocalQualifiers()) { 6107 // Handle the common positive case fast. 6108 if (const auto *AT = dyn_cast<ArrayType>(T)) 6109 return AT; 6110 } 6111 6112 // Handle the common negative case fast. 6113 if (!isa<ArrayType>(T.getCanonicalType())) 6114 return nullptr; 6115 6116 // Apply any qualifiers from the array type to the element type. This 6117 // implements C99 6.7.3p8: "If the specification of an array type includes 6118 // any type qualifiers, the element type is so qualified, not the array type." 6119 6120 // If we get here, we either have type qualifiers on the type, or we have 6121 // sugar such as a typedef in the way. If we have type qualifiers on the type 6122 // we must propagate them down into the element type. 6123 6124 SplitQualType split = T.getSplitDesugaredType(); 6125 Qualifiers qs = split.Quals; 6126 6127 // If we have a simple case, just return now. 6128 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6129 if (!ATy || qs.empty()) 6130 return ATy; 6131 6132 // Otherwise, we have an array and we have qualifiers on it. Push the 6133 // qualifiers into the array element type and return a new array type. 6134 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6135 6136 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6137 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6138 CAT->getSizeExpr(), 6139 CAT->getSizeModifier(), 6140 CAT->getIndexTypeCVRQualifiers())); 6141 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6142 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6143 IAT->getSizeModifier(), 6144 IAT->getIndexTypeCVRQualifiers())); 6145 6146 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6147 return cast<ArrayType>( 6148 getDependentSizedArrayType(NewEltTy, 6149 DSAT->getSizeExpr(), 6150 DSAT->getSizeModifier(), 6151 DSAT->getIndexTypeCVRQualifiers(), 6152 DSAT->getBracketsRange())); 6153 6154 const auto *VAT = cast<VariableArrayType>(ATy); 6155 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6156 VAT->getSizeExpr(), 6157 VAT->getSizeModifier(), 6158 VAT->getIndexTypeCVRQualifiers(), 6159 VAT->getBracketsRange())); 6160 } 6161 6162 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6163 if (T->isArrayType() || T->isFunctionType()) 6164 return getDecayedType(T); 6165 return T; 6166 } 6167 6168 QualType ASTContext::getSignatureParameterType(QualType T) const { 6169 T = getVariableArrayDecayedType(T); 6170 T = getAdjustedParameterType(T); 6171 return T.getUnqualifiedType(); 6172 } 6173 6174 QualType ASTContext::getExceptionObjectType(QualType T) const { 6175 // C++ [except.throw]p3: 6176 // A throw-expression initializes a temporary object, called the exception 6177 // object, the type of which is determined by removing any top-level 6178 // cv-qualifiers from the static type of the operand of throw and adjusting 6179 // the type from "array of T" or "function returning T" to "pointer to T" 6180 // or "pointer to function returning T", [...] 6181 T = getVariableArrayDecayedType(T); 6182 if (T->isArrayType() || T->isFunctionType()) 6183 T = getDecayedType(T); 6184 return T.getUnqualifiedType(); 6185 } 6186 6187 /// getArrayDecayedType - Return the properly qualified result of decaying the 6188 /// specified array type to a pointer. This operation is non-trivial when 6189 /// handling typedefs etc. The canonical type of "T" must be an array type, 6190 /// this returns a pointer to a properly qualified element of the array. 6191 /// 6192 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6193 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6194 // Get the element type with 'getAsArrayType' so that we don't lose any 6195 // typedefs in the element type of the array. This also handles propagation 6196 // of type qualifiers from the array type into the element type if present 6197 // (C99 6.7.3p8). 6198 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6199 assert(PrettyArrayType && "Not an array type!"); 6200 6201 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6202 6203 // int x[restrict 4] -> int *restrict 6204 QualType Result = getQualifiedType(PtrTy, 6205 PrettyArrayType->getIndexTypeQualifiers()); 6206 6207 // int x[_Nullable] -> int * _Nullable 6208 if (auto Nullability = Ty->getNullability(*this)) { 6209 Result = const_cast<ASTContext *>(this)->getAttributedType( 6210 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6211 } 6212 return Result; 6213 } 6214 6215 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6216 return getBaseElementType(array->getElementType()); 6217 } 6218 6219 QualType ASTContext::getBaseElementType(QualType type) const { 6220 Qualifiers qs; 6221 while (true) { 6222 SplitQualType split = type.getSplitDesugaredType(); 6223 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6224 if (!array) break; 6225 6226 type = array->getElementType(); 6227 qs.addConsistentQualifiers(split.Quals); 6228 } 6229 6230 return getQualifiedType(type, qs); 6231 } 6232 6233 /// getConstantArrayElementCount - Returns number of constant array elements. 6234 uint64_t 6235 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6236 uint64_t ElementCount = 1; 6237 do { 6238 ElementCount *= CA->getSize().getZExtValue(); 6239 CA = dyn_cast_or_null<ConstantArrayType>( 6240 CA->getElementType()->getAsArrayTypeUnsafe()); 6241 } while (CA); 6242 return ElementCount; 6243 } 6244 6245 /// getFloatingRank - Return a relative rank for floating point types. 6246 /// This routine will assert if passed a built-in type that isn't a float. 6247 static FloatingRank getFloatingRank(QualType T) { 6248 if (const auto *CT = T->getAs<ComplexType>()) 6249 return getFloatingRank(CT->getElementType()); 6250 6251 switch (T->castAs<BuiltinType>()->getKind()) { 6252 default: llvm_unreachable("getFloatingRank(): not a floating type"); 6253 case BuiltinType::Float16: return Float16Rank; 6254 case BuiltinType::Half: return HalfRank; 6255 case BuiltinType::Float: return FloatRank; 6256 case BuiltinType::Double: return DoubleRank; 6257 case BuiltinType::LongDouble: return LongDoubleRank; 6258 case BuiltinType::Float128: return Float128Rank; 6259 case BuiltinType::BFloat16: return BFloat16Rank; 6260 } 6261 } 6262 6263 /// getFloatingTypeOfSizeWithinDomain - Returns a real floating 6264 /// point or a complex type (based on typeDomain/typeSize). 6265 /// 'typeDomain' is a real floating point or complex type. 6266 /// 'typeSize' is a real floating point or complex type. 6267 QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, 6268 QualType Domain) const { 6269 FloatingRank EltRank = getFloatingRank(Size); 6270 if (Domain->isComplexType()) { 6271 switch (EltRank) { 6272 case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported"); 6273 case Float16Rank: 6274 case HalfRank: llvm_unreachable("Complex half is not supported"); 6275 case FloatRank: return FloatComplexTy; 6276 case DoubleRank: return DoubleComplexTy; 6277 case LongDoubleRank: return LongDoubleComplexTy; 6278 case Float128Rank: return Float128ComplexTy; 6279 } 6280 } 6281 6282 assert(Domain->isRealFloatingType() && "Unknown domain!"); 6283 switch (EltRank) { 6284 case Float16Rank: return HalfTy; 6285 case BFloat16Rank: return BFloat16Ty; 6286 case HalfRank: return HalfTy; 6287 case FloatRank: return FloatTy; 6288 case DoubleRank: return DoubleTy; 6289 case LongDoubleRank: return LongDoubleTy; 6290 case Float128Rank: return Float128Ty; 6291 } 6292 llvm_unreachable("getFloatingRank(): illegal value for rank"); 6293 } 6294 6295 /// getFloatingTypeOrder - Compare the rank of the two specified floating 6296 /// point types, ignoring the domain of the type (i.e. 'double' == 6297 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 6298 /// LHS < RHS, return -1. 6299 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 6300 FloatingRank LHSR = getFloatingRank(LHS); 6301 FloatingRank RHSR = getFloatingRank(RHS); 6302 6303 if (LHSR == RHSR) 6304 return 0; 6305 if (LHSR > RHSR) 6306 return 1; 6307 return -1; 6308 } 6309 6310 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 6311 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 6312 return 0; 6313 return getFloatingTypeOrder(LHS, RHS); 6314 } 6315 6316 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 6317 /// routine will assert if passed a built-in type that isn't an integer or enum, 6318 /// or if it is not canonicalized. 6319 unsigned ASTContext::getIntegerRank(const Type *T) const { 6320 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 6321 6322 // Results in this 'losing' to any type of the same size, but winning if 6323 // larger. 6324 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 6325 return 0 + (EIT->getNumBits() << 3); 6326 6327 switch (cast<BuiltinType>(T)->getKind()) { 6328 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 6329 case BuiltinType::Bool: 6330 return 1 + (getIntWidth(BoolTy) << 3); 6331 case BuiltinType::Char_S: 6332 case BuiltinType::Char_U: 6333 case BuiltinType::SChar: 6334 case BuiltinType::UChar: 6335 return 2 + (getIntWidth(CharTy) << 3); 6336 case BuiltinType::Short: 6337 case BuiltinType::UShort: 6338 return 3 + (getIntWidth(ShortTy) << 3); 6339 case BuiltinType::Int: 6340 case BuiltinType::UInt: 6341 return 4 + (getIntWidth(IntTy) << 3); 6342 case BuiltinType::Long: 6343 case BuiltinType::ULong: 6344 return 5 + (getIntWidth(LongTy) << 3); 6345 case BuiltinType::LongLong: 6346 case BuiltinType::ULongLong: 6347 return 6 + (getIntWidth(LongLongTy) << 3); 6348 case BuiltinType::Int128: 6349 case BuiltinType::UInt128: 6350 return 7 + (getIntWidth(Int128Ty) << 3); 6351 } 6352 } 6353 6354 /// Whether this is a promotable bitfield reference according 6355 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 6356 /// 6357 /// \returns the type this bit-field will promote to, or NULL if no 6358 /// promotion occurs. 6359 QualType ASTContext::isPromotableBitField(Expr *E) const { 6360 if (E->isTypeDependent() || E->isValueDependent()) 6361 return {}; 6362 6363 // C++ [conv.prom]p5: 6364 // If the bit-field has an enumerated type, it is treated as any other 6365 // value of that type for promotion purposes. 6366 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 6367 return {}; 6368 6369 // FIXME: We should not do this unless E->refersToBitField() is true. This 6370 // matters in C where getSourceBitField() will find bit-fields for various 6371 // cases where the source expression is not a bit-field designator. 6372 6373 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 6374 if (!Field) 6375 return {}; 6376 6377 QualType FT = Field->getType(); 6378 6379 uint64_t BitWidth = Field->getBitWidthValue(*this); 6380 uint64_t IntSize = getTypeSize(IntTy); 6381 // C++ [conv.prom]p5: 6382 // A prvalue for an integral bit-field can be converted to a prvalue of type 6383 // int if int can represent all the values of the bit-field; otherwise, it 6384 // can be converted to unsigned int if unsigned int can represent all the 6385 // values of the bit-field. If the bit-field is larger yet, no integral 6386 // promotion applies to it. 6387 // C11 6.3.1.1/2: 6388 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 6389 // If an int can represent all values of the original type (as restricted by 6390 // the width, for a bit-field), the value is converted to an int; otherwise, 6391 // it is converted to an unsigned int. 6392 // 6393 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 6394 // We perform that promotion here to match GCC and C++. 6395 // FIXME: C does not permit promotion of an enum bit-field whose rank is 6396 // greater than that of 'int'. We perform that promotion to match GCC. 6397 if (BitWidth < IntSize) 6398 return IntTy; 6399 6400 if (BitWidth == IntSize) 6401 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 6402 6403 // Bit-fields wider than int are not subject to promotions, and therefore act 6404 // like the base type. GCC has some weird bugs in this area that we 6405 // deliberately do not follow (GCC follows a pre-standard resolution to 6406 // C's DR315 which treats bit-width as being part of the type, and this leaks 6407 // into their semantics in some cases). 6408 return {}; 6409 } 6410 6411 /// getPromotedIntegerType - Returns the type that Promotable will 6412 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 6413 /// integer type. 6414 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 6415 assert(!Promotable.isNull()); 6416 assert(Promotable->isPromotableIntegerType()); 6417 if (const auto *ET = Promotable->getAs<EnumType>()) 6418 return ET->getDecl()->getPromotionType(); 6419 6420 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 6421 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 6422 // (3.9.1) can be converted to a prvalue of the first of the following 6423 // types that can represent all the values of its underlying type: 6424 // int, unsigned int, long int, unsigned long int, long long int, or 6425 // unsigned long long int [...] 6426 // FIXME: Is there some better way to compute this? 6427 if (BT->getKind() == BuiltinType::WChar_S || 6428 BT->getKind() == BuiltinType::WChar_U || 6429 BT->getKind() == BuiltinType::Char8 || 6430 BT->getKind() == BuiltinType::Char16 || 6431 BT->getKind() == BuiltinType::Char32) { 6432 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 6433 uint64_t FromSize = getTypeSize(BT); 6434 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 6435 LongLongTy, UnsignedLongLongTy }; 6436 for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { 6437 uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); 6438 if (FromSize < ToSize || 6439 (FromSize == ToSize && 6440 FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) 6441 return PromoteTypes[Idx]; 6442 } 6443 llvm_unreachable("char type should fit into long long"); 6444 } 6445 } 6446 6447 // At this point, we should have a signed or unsigned integer type. 6448 if (Promotable->isSignedIntegerType()) 6449 return IntTy; 6450 uint64_t PromotableSize = getIntWidth(Promotable); 6451 uint64_t IntSize = getIntWidth(IntTy); 6452 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 6453 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 6454 } 6455 6456 /// Recurses in pointer/array types until it finds an objc retainable 6457 /// type and returns its ownership. 6458 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 6459 while (!T.isNull()) { 6460 if (T.getObjCLifetime() != Qualifiers::OCL_None) 6461 return T.getObjCLifetime(); 6462 if (T->isArrayType()) 6463 T = getBaseElementType(T); 6464 else if (const auto *PT = T->getAs<PointerType>()) 6465 T = PT->getPointeeType(); 6466 else if (const auto *RT = T->getAs<ReferenceType>()) 6467 T = RT->getPointeeType(); 6468 else 6469 break; 6470 } 6471 6472 return Qualifiers::OCL_None; 6473 } 6474 6475 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 6476 // Incomplete enum types are not treated as integer types. 6477 // FIXME: In C++, enum types are never integer types. 6478 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 6479 return ET->getDecl()->getIntegerType().getTypePtr(); 6480 return nullptr; 6481 } 6482 6483 /// getIntegerTypeOrder - Returns the highest ranked integer type: 6484 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 6485 /// LHS < RHS, return -1. 6486 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 6487 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 6488 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 6489 6490 // Unwrap enums to their underlying type. 6491 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 6492 LHSC = getIntegerTypeForEnum(ET); 6493 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 6494 RHSC = getIntegerTypeForEnum(ET); 6495 6496 if (LHSC == RHSC) return 0; 6497 6498 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 6499 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 6500 6501 unsigned LHSRank = getIntegerRank(LHSC); 6502 unsigned RHSRank = getIntegerRank(RHSC); 6503 6504 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 6505 if (LHSRank == RHSRank) return 0; 6506 return LHSRank > RHSRank ? 1 : -1; 6507 } 6508 6509 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 6510 if (LHSUnsigned) { 6511 // If the unsigned [LHS] type is larger, return it. 6512 if (LHSRank >= RHSRank) 6513 return 1; 6514 6515 // If the signed type can represent all values of the unsigned type, it 6516 // wins. Because we are dealing with 2's complement and types that are 6517 // powers of two larger than each other, this is always safe. 6518 return -1; 6519 } 6520 6521 // If the unsigned [RHS] type is larger, return it. 6522 if (RHSRank >= LHSRank) 6523 return -1; 6524 6525 // If the signed type can represent all values of the unsigned type, it 6526 // wins. Because we are dealing with 2's complement and types that are 6527 // powers of two larger than each other, this is always safe. 6528 return 1; 6529 } 6530 6531 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 6532 if (CFConstantStringTypeDecl) 6533 return CFConstantStringTypeDecl; 6534 6535 assert(!CFConstantStringTagDecl && 6536 "tag and typedef should be initialized together"); 6537 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 6538 CFConstantStringTagDecl->startDefinition(); 6539 6540 struct { 6541 QualType Type; 6542 const char *Name; 6543 } Fields[5]; 6544 unsigned Count = 0; 6545 6546 /// Objective-C ABI 6547 /// 6548 /// typedef struct __NSConstantString_tag { 6549 /// const int *isa; 6550 /// int flags; 6551 /// const char *str; 6552 /// long length; 6553 /// } __NSConstantString; 6554 /// 6555 /// Swift ABI (4.1, 4.2) 6556 /// 6557 /// typedef struct __NSConstantString_tag { 6558 /// uintptr_t _cfisa; 6559 /// uintptr_t _swift_rc; 6560 /// _Atomic(uint64_t) _cfinfoa; 6561 /// const char *_ptr; 6562 /// uint32_t _length; 6563 /// } __NSConstantString; 6564 /// 6565 /// Swift ABI (5.0) 6566 /// 6567 /// typedef struct __NSConstantString_tag { 6568 /// uintptr_t _cfisa; 6569 /// uintptr_t _swift_rc; 6570 /// _Atomic(uint64_t) _cfinfoa; 6571 /// const char *_ptr; 6572 /// uintptr_t _length; 6573 /// } __NSConstantString; 6574 6575 const auto CFRuntime = getLangOpts().CFRuntime; 6576 if (static_cast<unsigned>(CFRuntime) < 6577 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 6578 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 6579 Fields[Count++] = { IntTy, "flags" }; 6580 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 6581 Fields[Count++] = { LongTy, "length" }; 6582 } else { 6583 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 6584 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 6585 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 6586 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 6587 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 6588 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 6589 Fields[Count++] = { IntTy, "_ptr" }; 6590 else 6591 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 6592 } 6593 6594 // Create fields 6595 for (unsigned i = 0; i < Count; ++i) { 6596 FieldDecl *Field = 6597 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 6598 SourceLocation(), &Idents.get(Fields[i].Name), 6599 Fields[i].Type, /*TInfo=*/nullptr, 6600 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 6601 Field->setAccess(AS_public); 6602 CFConstantStringTagDecl->addDecl(Field); 6603 } 6604 6605 CFConstantStringTagDecl->completeDefinition(); 6606 // This type is designed to be compatible with NSConstantString, but cannot 6607 // use the same name, since NSConstantString is an interface. 6608 auto tagType = getTagDeclType(CFConstantStringTagDecl); 6609 CFConstantStringTypeDecl = 6610 buildImplicitTypedef(tagType, "__NSConstantString"); 6611 6612 return CFConstantStringTypeDecl; 6613 } 6614 6615 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 6616 if (!CFConstantStringTagDecl) 6617 getCFConstantStringDecl(); // Build the tag and the typedef. 6618 return CFConstantStringTagDecl; 6619 } 6620 6621 // getCFConstantStringType - Return the type used for constant CFStrings. 6622 QualType ASTContext::getCFConstantStringType() const { 6623 return getTypedefType(getCFConstantStringDecl()); 6624 } 6625 6626 QualType ASTContext::getObjCSuperType() const { 6627 if (ObjCSuperType.isNull()) { 6628 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 6629 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 6630 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 6631 } 6632 return ObjCSuperType; 6633 } 6634 6635 void ASTContext::setCFConstantStringType(QualType T) { 6636 const auto *TD = T->castAs<TypedefType>(); 6637 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 6638 const auto *TagType = 6639 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 6640 CFConstantStringTagDecl = TagType->getDecl(); 6641 } 6642 6643 QualType ASTContext::getBlockDescriptorType() const { 6644 if (BlockDescriptorType) 6645 return getTagDeclType(BlockDescriptorType); 6646 6647 RecordDecl *RD; 6648 // FIXME: Needs the FlagAppleBlock bit. 6649 RD = buildImplicitRecord("__block_descriptor"); 6650 RD->startDefinition(); 6651 6652 QualType FieldTypes[] = { 6653 UnsignedLongTy, 6654 UnsignedLongTy, 6655 }; 6656 6657 static const char *const FieldNames[] = { 6658 "reserved", 6659 "Size" 6660 }; 6661 6662 for (size_t i = 0; i < 2; ++i) { 6663 FieldDecl *Field = FieldDecl::Create( 6664 *this, RD, SourceLocation(), SourceLocation(), 6665 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 6666 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 6667 Field->setAccess(AS_public); 6668 RD->addDecl(Field); 6669 } 6670 6671 RD->completeDefinition(); 6672 6673 BlockDescriptorType = RD; 6674 6675 return getTagDeclType(BlockDescriptorType); 6676 } 6677 6678 QualType ASTContext::getBlockDescriptorExtendedType() const { 6679 if (BlockDescriptorExtendedType) 6680 return getTagDeclType(BlockDescriptorExtendedType); 6681 6682 RecordDecl *RD; 6683 // FIXME: Needs the FlagAppleBlock bit. 6684 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 6685 RD->startDefinition(); 6686 6687 QualType FieldTypes[] = { 6688 UnsignedLongTy, 6689 UnsignedLongTy, 6690 getPointerType(VoidPtrTy), 6691 getPointerType(VoidPtrTy) 6692 }; 6693 6694 static const char *const FieldNames[] = { 6695 "reserved", 6696 "Size", 6697 "CopyFuncPtr", 6698 "DestroyFuncPtr" 6699 }; 6700 6701 for (size_t i = 0; i < 4; ++i) { 6702 FieldDecl *Field = FieldDecl::Create( 6703 *this, RD, SourceLocation(), SourceLocation(), 6704 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 6705 /*BitWidth=*/nullptr, 6706 /*Mutable=*/false, ICIS_NoInit); 6707 Field->setAccess(AS_public); 6708 RD->addDecl(Field); 6709 } 6710 6711 RD->completeDefinition(); 6712 6713 BlockDescriptorExtendedType = RD; 6714 return getTagDeclType(BlockDescriptorExtendedType); 6715 } 6716 6717 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 6718 const auto *BT = dyn_cast<BuiltinType>(T); 6719 6720 if (!BT) { 6721 if (isa<PipeType>(T)) 6722 return OCLTK_Pipe; 6723 6724 return OCLTK_Default; 6725 } 6726 6727 switch (BT->getKind()) { 6728 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 6729 case BuiltinType::Id: \ 6730 return OCLTK_Image; 6731 #include "clang/Basic/OpenCLImageTypes.def" 6732 6733 case BuiltinType::OCLClkEvent: 6734 return OCLTK_ClkEvent; 6735 6736 case BuiltinType::OCLEvent: 6737 return OCLTK_Event; 6738 6739 case BuiltinType::OCLQueue: 6740 return OCLTK_Queue; 6741 6742 case BuiltinType::OCLReserveID: 6743 return OCLTK_ReserveID; 6744 6745 case BuiltinType::OCLSampler: 6746 return OCLTK_Sampler; 6747 6748 default: 6749 return OCLTK_Default; 6750 } 6751 } 6752 6753 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 6754 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 6755 } 6756 6757 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 6758 /// requires copy/dispose. Note that this must match the logic 6759 /// in buildByrefHelpers. 6760 bool ASTContext::BlockRequiresCopying(QualType Ty, 6761 const VarDecl *D) { 6762 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 6763 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 6764 if (!copyExpr && record->hasTrivialDestructor()) return false; 6765 6766 return true; 6767 } 6768 6769 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 6770 // move or destroy. 6771 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 6772 return true; 6773 6774 if (!Ty->isObjCRetainableType()) return false; 6775 6776 Qualifiers qs = Ty.getQualifiers(); 6777 6778 // If we have lifetime, that dominates. 6779 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 6780 switch (lifetime) { 6781 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 6782 6783 // These are just bits as far as the runtime is concerned. 6784 case Qualifiers::OCL_ExplicitNone: 6785 case Qualifiers::OCL_Autoreleasing: 6786 return false; 6787 6788 // These cases should have been taken care of when checking the type's 6789 // non-triviality. 6790 case Qualifiers::OCL_Weak: 6791 case Qualifiers::OCL_Strong: 6792 llvm_unreachable("impossible"); 6793 } 6794 llvm_unreachable("fell out of lifetime switch!"); 6795 } 6796 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 6797 Ty->isObjCObjectPointerType()); 6798 } 6799 6800 bool ASTContext::getByrefLifetime(QualType Ty, 6801 Qualifiers::ObjCLifetime &LifeTime, 6802 bool &HasByrefExtendedLayout) const { 6803 if (!getLangOpts().ObjC || 6804 getLangOpts().getGC() != LangOptions::NonGC) 6805 return false; 6806 6807 HasByrefExtendedLayout = false; 6808 if (Ty->isRecordType()) { 6809 HasByrefExtendedLayout = true; 6810 LifeTime = Qualifiers::OCL_None; 6811 } else if ((LifeTime = Ty.getObjCLifetime())) { 6812 // Honor the ARC qualifiers. 6813 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 6814 // The MRR rule. 6815 LifeTime = Qualifiers::OCL_ExplicitNone; 6816 } else { 6817 LifeTime = Qualifiers::OCL_None; 6818 } 6819 return true; 6820 } 6821 6822 CanQualType ASTContext::getNSUIntegerType() const { 6823 assert(Target && "Expected target to be initialized"); 6824 const llvm::Triple &T = Target->getTriple(); 6825 // Windows is LLP64 rather than LP64 6826 if (T.isOSWindows() && T.isArch64Bit()) 6827 return UnsignedLongLongTy; 6828 return UnsignedLongTy; 6829 } 6830 6831 CanQualType ASTContext::getNSIntegerType() const { 6832 assert(Target && "Expected target to be initialized"); 6833 const llvm::Triple &T = Target->getTriple(); 6834 // Windows is LLP64 rather than LP64 6835 if (T.isOSWindows() && T.isArch64Bit()) 6836 return LongLongTy; 6837 return LongTy; 6838 } 6839 6840 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 6841 if (!ObjCInstanceTypeDecl) 6842 ObjCInstanceTypeDecl = 6843 buildImplicitTypedef(getObjCIdType(), "instancetype"); 6844 return ObjCInstanceTypeDecl; 6845 } 6846 6847 // This returns true if a type has been typedefed to BOOL: 6848 // typedef <type> BOOL; 6849 static bool isTypeTypedefedAsBOOL(QualType T) { 6850 if (const auto *TT = dyn_cast<TypedefType>(T)) 6851 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 6852 return II->isStr("BOOL"); 6853 6854 return false; 6855 } 6856 6857 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 6858 /// purpose. 6859 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 6860 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 6861 return CharUnits::Zero(); 6862 6863 CharUnits sz = getTypeSizeInChars(type); 6864 6865 // Make all integer and enum types at least as large as an int 6866 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 6867 sz = std::max(sz, getTypeSizeInChars(IntTy)); 6868 // Treat arrays as pointers, since that's how they're passed in. 6869 else if (type->isArrayType()) 6870 sz = getTypeSizeInChars(VoidPtrTy); 6871 return sz; 6872 } 6873 6874 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 6875 return getTargetInfo().getCXXABI().isMicrosoft() && 6876 VD->isStaticDataMember() && 6877 VD->getType()->isIntegralOrEnumerationType() && 6878 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 6879 } 6880 6881 ASTContext::InlineVariableDefinitionKind 6882 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 6883 if (!VD->isInline()) 6884 return InlineVariableDefinitionKind::None; 6885 6886 // In almost all cases, it's a weak definition. 6887 auto *First = VD->getFirstDecl(); 6888 if (First->isInlineSpecified() || !First->isStaticDataMember()) 6889 return InlineVariableDefinitionKind::Weak; 6890 6891 // If there's a file-context declaration in this translation unit, it's a 6892 // non-discardable definition. 6893 for (auto *D : VD->redecls()) 6894 if (D->getLexicalDeclContext()->isFileContext() && 6895 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 6896 return InlineVariableDefinitionKind::Strong; 6897 6898 // If we've not seen one yet, we don't know. 6899 return InlineVariableDefinitionKind::WeakUnknown; 6900 } 6901 6902 static std::string charUnitsToString(const CharUnits &CU) { 6903 return llvm::itostr(CU.getQuantity()); 6904 } 6905 6906 /// getObjCEncodingForBlock - Return the encoded type for this block 6907 /// declaration. 6908 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 6909 std::string S; 6910 6911 const BlockDecl *Decl = Expr->getBlockDecl(); 6912 QualType BlockTy = 6913 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 6914 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 6915 // Encode result type. 6916 if (getLangOpts().EncodeExtendedBlockSig) 6917 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 6918 true /*Extended*/); 6919 else 6920 getObjCEncodingForType(BlockReturnTy, S); 6921 // Compute size of all parameters. 6922 // Start with computing size of a pointer in number of bytes. 6923 // FIXME: There might(should) be a better way of doing this computation! 6924 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 6925 CharUnits ParmOffset = PtrSize; 6926 for (auto PI : Decl->parameters()) { 6927 QualType PType = PI->getType(); 6928 CharUnits sz = getObjCEncodingTypeSize(PType); 6929 if (sz.isZero()) 6930 continue; 6931 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 6932 ParmOffset += sz; 6933 } 6934 // Size of the argument frame 6935 S += charUnitsToString(ParmOffset); 6936 // Block pointer and offset. 6937 S += "@?0"; 6938 6939 // Argument types. 6940 ParmOffset = PtrSize; 6941 for (auto PVDecl : Decl->parameters()) { 6942 QualType PType = PVDecl->getOriginalType(); 6943 if (const auto *AT = 6944 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 6945 // Use array's original type only if it has known number of 6946 // elements. 6947 if (!isa<ConstantArrayType>(AT)) 6948 PType = PVDecl->getType(); 6949 } else if (PType->isFunctionType()) 6950 PType = PVDecl->getType(); 6951 if (getLangOpts().EncodeExtendedBlockSig) 6952 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 6953 S, true /*Extended*/); 6954 else 6955 getObjCEncodingForType(PType, S); 6956 S += charUnitsToString(ParmOffset); 6957 ParmOffset += getObjCEncodingTypeSize(PType); 6958 } 6959 6960 return S; 6961 } 6962 6963 std::string 6964 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 6965 std::string S; 6966 // Encode result type. 6967 getObjCEncodingForType(Decl->getReturnType(), S); 6968 CharUnits ParmOffset; 6969 // Compute size of all parameters. 6970 for (auto PI : Decl->parameters()) { 6971 QualType PType = PI->getType(); 6972 CharUnits sz = getObjCEncodingTypeSize(PType); 6973 if (sz.isZero()) 6974 continue; 6975 6976 assert(sz.isPositive() && 6977 "getObjCEncodingForFunctionDecl - Incomplete param type"); 6978 ParmOffset += sz; 6979 } 6980 S += charUnitsToString(ParmOffset); 6981 ParmOffset = CharUnits::Zero(); 6982 6983 // Argument types. 6984 for (auto PVDecl : Decl->parameters()) { 6985 QualType PType = PVDecl->getOriginalType(); 6986 if (const auto *AT = 6987 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 6988 // Use array's original type only if it has known number of 6989 // elements. 6990 if (!isa<ConstantArrayType>(AT)) 6991 PType = PVDecl->getType(); 6992 } else if (PType->isFunctionType()) 6993 PType = PVDecl->getType(); 6994 getObjCEncodingForType(PType, S); 6995 S += charUnitsToString(ParmOffset); 6996 ParmOffset += getObjCEncodingTypeSize(PType); 6997 } 6998 6999 return S; 7000 } 7001 7002 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7003 /// method parameter or return type. If Extended, include class names and 7004 /// block object types. 7005 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7006 QualType T, std::string& S, 7007 bool Extended) const { 7008 // Encode type qualifer, 'in', 'inout', etc. for the parameter. 7009 getObjCEncodingForTypeQualifier(QT, S); 7010 // Encode parameter type. 7011 ObjCEncOptions Options = ObjCEncOptions() 7012 .setExpandPointedToStructures() 7013 .setExpandStructures() 7014 .setIsOutermostType(); 7015 if (Extended) 7016 Options.setEncodeBlockParameters().setEncodeClassNames(); 7017 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7018 } 7019 7020 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7021 /// declaration. 7022 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7023 bool Extended) const { 7024 // FIXME: This is not very efficient. 7025 // Encode return type. 7026 std::string S; 7027 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7028 Decl->getReturnType(), S, Extended); 7029 // Compute size of all parameters. 7030 // Start with computing size of a pointer in number of bytes. 7031 // FIXME: There might(should) be a better way of doing this computation! 7032 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7033 // The first two arguments (self and _cmd) are pointers; account for 7034 // their size. 7035 CharUnits ParmOffset = 2 * PtrSize; 7036 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7037 E = Decl->sel_param_end(); PI != E; ++PI) { 7038 QualType PType = (*PI)->getType(); 7039 CharUnits sz = getObjCEncodingTypeSize(PType); 7040 if (sz.isZero()) 7041 continue; 7042 7043 assert(sz.isPositive() && 7044 "getObjCEncodingForMethodDecl - Incomplete param type"); 7045 ParmOffset += sz; 7046 } 7047 S += charUnitsToString(ParmOffset); 7048 S += "@0:"; 7049 S += charUnitsToString(PtrSize); 7050 7051 // Argument types. 7052 ParmOffset = 2 * PtrSize; 7053 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7054 E = Decl->sel_param_end(); PI != E; ++PI) { 7055 const ParmVarDecl *PVDecl = *PI; 7056 QualType PType = PVDecl->getOriginalType(); 7057 if (const auto *AT = 7058 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7059 // Use array's original type only if it has known number of 7060 // elements. 7061 if (!isa<ConstantArrayType>(AT)) 7062 PType = PVDecl->getType(); 7063 } else if (PType->isFunctionType()) 7064 PType = PVDecl->getType(); 7065 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7066 PType, S, Extended); 7067 S += charUnitsToString(ParmOffset); 7068 ParmOffset += getObjCEncodingTypeSize(PType); 7069 } 7070 7071 return S; 7072 } 7073 7074 ObjCPropertyImplDecl * 7075 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7076 const ObjCPropertyDecl *PD, 7077 const Decl *Container) const { 7078 if (!Container) 7079 return nullptr; 7080 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7081 for (auto *PID : CID->property_impls()) 7082 if (PID->getPropertyDecl() == PD) 7083 return PID; 7084 } else { 7085 const auto *OID = cast<ObjCImplementationDecl>(Container); 7086 for (auto *PID : OID->property_impls()) 7087 if (PID->getPropertyDecl() == PD) 7088 return PID; 7089 } 7090 return nullptr; 7091 } 7092 7093 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7094 /// property declaration. If non-NULL, Container must be either an 7095 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7096 /// NULL when getting encodings for protocol properties. 7097 /// Property attributes are stored as a comma-delimited C string. The simple 7098 /// attributes readonly and bycopy are encoded as single characters. The 7099 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7100 /// encoded as single characters, followed by an identifier. Property types 7101 /// are also encoded as a parametrized attribute. The characters used to encode 7102 /// these attributes are defined by the following enumeration: 7103 /// @code 7104 /// enum PropertyAttributes { 7105 /// kPropertyReadOnly = 'R', // property is read-only. 7106 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7107 /// kPropertyByref = '&', // property is a reference to the value last assigned 7108 /// kPropertyDynamic = 'D', // property is dynamic 7109 /// kPropertyGetter = 'G', // followed by getter selector name 7110 /// kPropertySetter = 'S', // followed by setter selector name 7111 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7112 /// kPropertyType = 'T' // followed by old-style type encoding. 7113 /// kPropertyWeak = 'W' // 'weak' property 7114 /// kPropertyStrong = 'P' // property GC'able 7115 /// kPropertyNonAtomic = 'N' // property non-atomic 7116 /// }; 7117 /// @endcode 7118 std::string 7119 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7120 const Decl *Container) const { 7121 // Collect information from the property implementation decl(s). 7122 bool Dynamic = false; 7123 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7124 7125 if (ObjCPropertyImplDecl *PropertyImpDecl = 7126 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7127 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7128 Dynamic = true; 7129 else 7130 SynthesizePID = PropertyImpDecl; 7131 } 7132 7133 // FIXME: This is not very efficient. 7134 std::string S = "T"; 7135 7136 // Encode result type. 7137 // GCC has some special rules regarding encoding of properties which 7138 // closely resembles encoding of ivars. 7139 getObjCEncodingForPropertyType(PD->getType(), S); 7140 7141 if (PD->isReadOnly()) { 7142 S += ",R"; 7143 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7144 S += ",C"; 7145 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7146 S += ",&"; 7147 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7148 S += ",W"; 7149 } else { 7150 switch (PD->getSetterKind()) { 7151 case ObjCPropertyDecl::Assign: break; 7152 case ObjCPropertyDecl::Copy: S += ",C"; break; 7153 case ObjCPropertyDecl::Retain: S += ",&"; break; 7154 case ObjCPropertyDecl::Weak: S += ",W"; break; 7155 } 7156 } 7157 7158 // It really isn't clear at all what this means, since properties 7159 // are "dynamic by default". 7160 if (Dynamic) 7161 S += ",D"; 7162 7163 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7164 S += ",N"; 7165 7166 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7167 S += ",G"; 7168 S += PD->getGetterName().getAsString(); 7169 } 7170 7171 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7172 S += ",S"; 7173 S += PD->getSetterName().getAsString(); 7174 } 7175 7176 if (SynthesizePID) { 7177 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7178 S += ",V"; 7179 S += OID->getNameAsString(); 7180 } 7181 7182 // FIXME: OBJCGC: weak & strong 7183 return S; 7184 } 7185 7186 /// getLegacyIntegralTypeEncoding - 7187 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7188 /// 'l' or 'L' , but not always. For typedefs, we need to use 7189 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7190 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7191 if (isa<TypedefType>(PointeeTy.getTypePtr())) { 7192 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7193 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7194 PointeeTy = UnsignedIntTy; 7195 else 7196 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7197 PointeeTy = IntTy; 7198 } 7199 } 7200 } 7201 7202 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7203 const FieldDecl *Field, 7204 QualType *NotEncodedT) const { 7205 // We follow the behavior of gcc, expanding structures which are 7206 // directly pointed to, and expanding embedded structures. Note that 7207 // these rules are sufficient to prevent recursive encoding of the 7208 // same type. 7209 getObjCEncodingForTypeImpl(T, S, 7210 ObjCEncOptions() 7211 .setExpandPointedToStructures() 7212 .setExpandStructures() 7213 .setIsOutermostType(), 7214 Field, NotEncodedT); 7215 } 7216 7217 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7218 std::string& S) const { 7219 // Encode result type. 7220 // GCC has some special rules regarding encoding of properties which 7221 // closely resembles encoding of ivars. 7222 getObjCEncodingForTypeImpl(T, S, 7223 ObjCEncOptions() 7224 .setExpandPointedToStructures() 7225 .setExpandStructures() 7226 .setIsOutermostType() 7227 .setEncodingProperty(), 7228 /*Field=*/nullptr); 7229 } 7230 7231 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7232 const BuiltinType *BT) { 7233 BuiltinType::Kind kind = BT->getKind(); 7234 switch (kind) { 7235 case BuiltinType::Void: return 'v'; 7236 case BuiltinType::Bool: return 'B'; 7237 case BuiltinType::Char8: 7238 case BuiltinType::Char_U: 7239 case BuiltinType::UChar: return 'C'; 7240 case BuiltinType::Char16: 7241 case BuiltinType::UShort: return 'S'; 7242 case BuiltinType::Char32: 7243 case BuiltinType::UInt: return 'I'; 7244 case BuiltinType::ULong: 7245 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7246 case BuiltinType::UInt128: return 'T'; 7247 case BuiltinType::ULongLong: return 'Q'; 7248 case BuiltinType::Char_S: 7249 case BuiltinType::SChar: return 'c'; 7250 case BuiltinType::Short: return 's'; 7251 case BuiltinType::WChar_S: 7252 case BuiltinType::WChar_U: 7253 case BuiltinType::Int: return 'i'; 7254 case BuiltinType::Long: 7255 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7256 case BuiltinType::LongLong: return 'q'; 7257 case BuiltinType::Int128: return 't'; 7258 case BuiltinType::Float: return 'f'; 7259 case BuiltinType::Double: return 'd'; 7260 case BuiltinType::LongDouble: return 'D'; 7261 case BuiltinType::NullPtr: return '*'; // like char* 7262 7263 case BuiltinType::BFloat16: 7264 case BuiltinType::Float16: 7265 case BuiltinType::Float128: 7266 case BuiltinType::Half: 7267 case BuiltinType::ShortAccum: 7268 case BuiltinType::Accum: 7269 case BuiltinType::LongAccum: 7270 case BuiltinType::UShortAccum: 7271 case BuiltinType::UAccum: 7272 case BuiltinType::ULongAccum: 7273 case BuiltinType::ShortFract: 7274 case BuiltinType::Fract: 7275 case BuiltinType::LongFract: 7276 case BuiltinType::UShortFract: 7277 case BuiltinType::UFract: 7278 case BuiltinType::ULongFract: 7279 case BuiltinType::SatShortAccum: 7280 case BuiltinType::SatAccum: 7281 case BuiltinType::SatLongAccum: 7282 case BuiltinType::SatUShortAccum: 7283 case BuiltinType::SatUAccum: 7284 case BuiltinType::SatULongAccum: 7285 case BuiltinType::SatShortFract: 7286 case BuiltinType::SatFract: 7287 case BuiltinType::SatLongFract: 7288 case BuiltinType::SatUShortFract: 7289 case BuiltinType::SatUFract: 7290 case BuiltinType::SatULongFract: 7291 // FIXME: potentially need @encodes for these! 7292 return ' '; 7293 7294 #define SVE_TYPE(Name, Id, SingletonId) \ 7295 case BuiltinType::Id: 7296 #include "clang/Basic/AArch64SVEACLETypes.def" 7297 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 7298 #include "clang/Basic/RISCVVTypes.def" 7299 { 7300 DiagnosticsEngine &Diags = C->getDiagnostics(); 7301 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 7302 "cannot yet @encode type %0"); 7303 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 7304 return ' '; 7305 } 7306 7307 case BuiltinType::ObjCId: 7308 case BuiltinType::ObjCClass: 7309 case BuiltinType::ObjCSel: 7310 llvm_unreachable("@encoding ObjC primitive type"); 7311 7312 // OpenCL and placeholder types don't need @encodings. 7313 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7314 case BuiltinType::Id: 7315 #include "clang/Basic/OpenCLImageTypes.def" 7316 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 7317 case BuiltinType::Id: 7318 #include "clang/Basic/OpenCLExtensionTypes.def" 7319 case BuiltinType::OCLEvent: 7320 case BuiltinType::OCLClkEvent: 7321 case BuiltinType::OCLQueue: 7322 case BuiltinType::OCLReserveID: 7323 case BuiltinType::OCLSampler: 7324 case BuiltinType::Dependent: 7325 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 7326 case BuiltinType::Id: 7327 #include "clang/Basic/PPCTypes.def" 7328 #define BUILTIN_TYPE(KIND, ID) 7329 #define PLACEHOLDER_TYPE(KIND, ID) \ 7330 case BuiltinType::KIND: 7331 #include "clang/AST/BuiltinTypes.def" 7332 llvm_unreachable("invalid builtin type for @encode"); 7333 } 7334 llvm_unreachable("invalid BuiltinType::Kind value"); 7335 } 7336 7337 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 7338 EnumDecl *Enum = ET->getDecl(); 7339 7340 // The encoding of an non-fixed enum type is always 'i', regardless of size. 7341 if (!Enum->isFixed()) 7342 return 'i'; 7343 7344 // The encoding of a fixed enum type matches its fixed underlying type. 7345 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 7346 return getObjCEncodingForPrimitiveType(C, BT); 7347 } 7348 7349 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 7350 QualType T, const FieldDecl *FD) { 7351 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 7352 S += 'b'; 7353 // The NeXT runtime encodes bit fields as b followed by the number of bits. 7354 // The GNU runtime requires more information; bitfields are encoded as b, 7355 // then the offset (in bits) of the first element, then the type of the 7356 // bitfield, then the size in bits. For example, in this structure: 7357 // 7358 // struct 7359 // { 7360 // int integer; 7361 // int flags:2; 7362 // }; 7363 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 7364 // runtime, but b32i2 for the GNU runtime. The reason for this extra 7365 // information is not especially sensible, but we're stuck with it for 7366 // compatibility with GCC, although providing it breaks anything that 7367 // actually uses runtime introspection and wants to work on both runtimes... 7368 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 7369 uint64_t Offset; 7370 7371 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 7372 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 7373 IVD); 7374 } else { 7375 const RecordDecl *RD = FD->getParent(); 7376 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 7377 Offset = RL.getFieldOffset(FD->getFieldIndex()); 7378 } 7379 7380 S += llvm::utostr(Offset); 7381 7382 if (const auto *ET = T->getAs<EnumType>()) 7383 S += ObjCEncodingForEnumType(Ctx, ET); 7384 else { 7385 const auto *BT = T->castAs<BuiltinType>(); 7386 S += getObjCEncodingForPrimitiveType(Ctx, BT); 7387 } 7388 } 7389 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 7390 } 7391 7392 // Helper function for determining whether the encoded type string would include 7393 // a template specialization type. 7394 static bool hasTemplateSpecializationInEncodedString(const Type *T, 7395 bool VisitBasesAndFields) { 7396 T = T->getBaseElementTypeUnsafe(); 7397 7398 if (auto *PT = T->getAs<PointerType>()) 7399 return hasTemplateSpecializationInEncodedString( 7400 PT->getPointeeType().getTypePtr(), false); 7401 7402 auto *CXXRD = T->getAsCXXRecordDecl(); 7403 7404 if (!CXXRD) 7405 return false; 7406 7407 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 7408 return true; 7409 7410 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 7411 return false; 7412 7413 for (auto B : CXXRD->bases()) 7414 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 7415 true)) 7416 return true; 7417 7418 for (auto *FD : CXXRD->fields()) 7419 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 7420 true)) 7421 return true; 7422 7423 return false; 7424 } 7425 7426 // FIXME: Use SmallString for accumulating string. 7427 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 7428 const ObjCEncOptions Options, 7429 const FieldDecl *FD, 7430 QualType *NotEncodedT) const { 7431 CanQualType CT = getCanonicalType(T); 7432 switch (CT->getTypeClass()) { 7433 case Type::Builtin: 7434 case Type::Enum: 7435 if (FD && FD->isBitField()) 7436 return EncodeBitField(this, S, T, FD); 7437 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 7438 S += getObjCEncodingForPrimitiveType(this, BT); 7439 else 7440 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 7441 return; 7442 7443 case Type::Complex: 7444 S += 'j'; 7445 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 7446 ObjCEncOptions(), 7447 /*Field=*/nullptr); 7448 return; 7449 7450 case Type::Atomic: 7451 S += 'A'; 7452 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 7453 ObjCEncOptions(), 7454 /*Field=*/nullptr); 7455 return; 7456 7457 // encoding for pointer or reference types. 7458 case Type::Pointer: 7459 case Type::LValueReference: 7460 case Type::RValueReference: { 7461 QualType PointeeTy; 7462 if (isa<PointerType>(CT)) { 7463 const auto *PT = T->castAs<PointerType>(); 7464 if (PT->isObjCSelType()) { 7465 S += ':'; 7466 return; 7467 } 7468 PointeeTy = PT->getPointeeType(); 7469 } else { 7470 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 7471 } 7472 7473 bool isReadOnly = false; 7474 // For historical/compatibility reasons, the read-only qualifier of the 7475 // pointee gets emitted _before_ the '^'. The read-only qualifier of 7476 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 7477 // Also, do not emit the 'r' for anything but the outermost type! 7478 if (isa<TypedefType>(T.getTypePtr())) { 7479 if (Options.IsOutermostType() && T.isConstQualified()) { 7480 isReadOnly = true; 7481 S += 'r'; 7482 } 7483 } else if (Options.IsOutermostType()) { 7484 QualType P = PointeeTy; 7485 while (auto PT = P->getAs<PointerType>()) 7486 P = PT->getPointeeType(); 7487 if (P.isConstQualified()) { 7488 isReadOnly = true; 7489 S += 'r'; 7490 } 7491 } 7492 if (isReadOnly) { 7493 // Another legacy compatibility encoding. Some ObjC qualifier and type 7494 // combinations need to be rearranged. 7495 // Rewrite "in const" from "nr" to "rn" 7496 if (StringRef(S).endswith("nr")) 7497 S.replace(S.end()-2, S.end(), "rn"); 7498 } 7499 7500 if (PointeeTy->isCharType()) { 7501 // char pointer types should be encoded as '*' unless it is a 7502 // type that has been typedef'd to 'BOOL'. 7503 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 7504 S += '*'; 7505 return; 7506 } 7507 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 7508 // GCC binary compat: Need to convert "struct objc_class *" to "#". 7509 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 7510 S += '#'; 7511 return; 7512 } 7513 // GCC binary compat: Need to convert "struct objc_object *" to "@". 7514 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 7515 S += '@'; 7516 return; 7517 } 7518 // If the encoded string for the class includes template names, just emit 7519 // "^v" for pointers to the class. 7520 if (getLangOpts().CPlusPlus && 7521 (!getLangOpts().EncodeCXXClassTemplateSpec && 7522 hasTemplateSpecializationInEncodedString( 7523 RTy, Options.ExpandPointedToStructures()))) { 7524 S += "^v"; 7525 return; 7526 } 7527 // fall through... 7528 } 7529 S += '^'; 7530 getLegacyIntegralTypeEncoding(PointeeTy); 7531 7532 ObjCEncOptions NewOptions; 7533 if (Options.ExpandPointedToStructures()) 7534 NewOptions.setExpandStructures(); 7535 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 7536 /*Field=*/nullptr, NotEncodedT); 7537 return; 7538 } 7539 7540 case Type::ConstantArray: 7541 case Type::IncompleteArray: 7542 case Type::VariableArray: { 7543 const auto *AT = cast<ArrayType>(CT); 7544 7545 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 7546 // Incomplete arrays are encoded as a pointer to the array element. 7547 S += '^'; 7548 7549 getObjCEncodingForTypeImpl( 7550 AT->getElementType(), S, 7551 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 7552 } else { 7553 S += '['; 7554 7555 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 7556 S += llvm::utostr(CAT->getSize().getZExtValue()); 7557 else { 7558 //Variable length arrays are encoded as a regular array with 0 elements. 7559 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 7560 "Unknown array type!"); 7561 S += '0'; 7562 } 7563 7564 getObjCEncodingForTypeImpl( 7565 AT->getElementType(), S, 7566 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 7567 NotEncodedT); 7568 S += ']'; 7569 } 7570 return; 7571 } 7572 7573 case Type::FunctionNoProto: 7574 case Type::FunctionProto: 7575 S += '?'; 7576 return; 7577 7578 case Type::Record: { 7579 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 7580 S += RDecl->isUnion() ? '(' : '{'; 7581 // Anonymous structures print as '?' 7582 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 7583 S += II->getName(); 7584 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 7585 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 7586 llvm::raw_string_ostream OS(S); 7587 printTemplateArgumentList(OS, TemplateArgs.asArray(), 7588 getPrintingPolicy()); 7589 } 7590 } else { 7591 S += '?'; 7592 } 7593 if (Options.ExpandStructures()) { 7594 S += '='; 7595 if (!RDecl->isUnion()) { 7596 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 7597 } else { 7598 for (const auto *Field : RDecl->fields()) { 7599 if (FD) { 7600 S += '"'; 7601 S += Field->getNameAsString(); 7602 S += '"'; 7603 } 7604 7605 // Special case bit-fields. 7606 if (Field->isBitField()) { 7607 getObjCEncodingForTypeImpl(Field->getType(), S, 7608 ObjCEncOptions().setExpandStructures(), 7609 Field); 7610 } else { 7611 QualType qt = Field->getType(); 7612 getLegacyIntegralTypeEncoding(qt); 7613 getObjCEncodingForTypeImpl( 7614 qt, S, 7615 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 7616 NotEncodedT); 7617 } 7618 } 7619 } 7620 } 7621 S += RDecl->isUnion() ? ')' : '}'; 7622 return; 7623 } 7624 7625 case Type::BlockPointer: { 7626 const auto *BT = T->castAs<BlockPointerType>(); 7627 S += "@?"; // Unlike a pointer-to-function, which is "^?". 7628 if (Options.EncodeBlockParameters()) { 7629 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 7630 7631 S += '<'; 7632 // Block return type 7633 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 7634 Options.forComponentType(), FD, NotEncodedT); 7635 // Block self 7636 S += "@?"; 7637 // Block parameters 7638 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 7639 for (const auto &I : FPT->param_types()) 7640 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 7641 NotEncodedT); 7642 } 7643 S += '>'; 7644 } 7645 return; 7646 } 7647 7648 case Type::ObjCObject: { 7649 // hack to match legacy encoding of *id and *Class 7650 QualType Ty = getObjCObjectPointerType(CT); 7651 if (Ty->isObjCIdType()) { 7652 S += "{objc_object=}"; 7653 return; 7654 } 7655 else if (Ty->isObjCClassType()) { 7656 S += "{objc_class=}"; 7657 return; 7658 } 7659 // TODO: Double check to make sure this intentionally falls through. 7660 LLVM_FALLTHROUGH; 7661 } 7662 7663 case Type::ObjCInterface: { 7664 // Ignore protocol qualifiers when mangling at this level. 7665 // @encode(class_name) 7666 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 7667 S += '{'; 7668 S += OI->getObjCRuntimeNameAsString(); 7669 if (Options.ExpandStructures()) { 7670 S += '='; 7671 SmallVector<const ObjCIvarDecl*, 32> Ivars; 7672 DeepCollectObjCIvars(OI, true, Ivars); 7673 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 7674 const FieldDecl *Field = Ivars[i]; 7675 if (Field->isBitField()) 7676 getObjCEncodingForTypeImpl(Field->getType(), S, 7677 ObjCEncOptions().setExpandStructures(), 7678 Field); 7679 else 7680 getObjCEncodingForTypeImpl(Field->getType(), S, 7681 ObjCEncOptions().setExpandStructures(), FD, 7682 NotEncodedT); 7683 } 7684 } 7685 S += '}'; 7686 return; 7687 } 7688 7689 case Type::ObjCObjectPointer: { 7690 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 7691 if (OPT->isObjCIdType()) { 7692 S += '@'; 7693 return; 7694 } 7695 7696 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 7697 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 7698 // Since this is a binary compatibility issue, need to consult with 7699 // runtime folks. Fortunately, this is a *very* obscure construct. 7700 S += '#'; 7701 return; 7702 } 7703 7704 if (OPT->isObjCQualifiedIdType()) { 7705 getObjCEncodingForTypeImpl( 7706 getObjCIdType(), S, 7707 Options.keepingOnly(ObjCEncOptions() 7708 .setExpandPointedToStructures() 7709 .setExpandStructures()), 7710 FD); 7711 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 7712 // Note that we do extended encoding of protocol qualifer list 7713 // Only when doing ivar or property encoding. 7714 S += '"'; 7715 for (const auto *I : OPT->quals()) { 7716 S += '<'; 7717 S += I->getObjCRuntimeNameAsString(); 7718 S += '>'; 7719 } 7720 S += '"'; 7721 } 7722 return; 7723 } 7724 7725 S += '@'; 7726 if (OPT->getInterfaceDecl() && 7727 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 7728 S += '"'; 7729 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 7730 for (const auto *I : OPT->quals()) { 7731 S += '<'; 7732 S += I->getObjCRuntimeNameAsString(); 7733 S += '>'; 7734 } 7735 S += '"'; 7736 } 7737 return; 7738 } 7739 7740 // gcc just blithely ignores member pointers. 7741 // FIXME: we should do better than that. 'M' is available. 7742 case Type::MemberPointer: 7743 // This matches gcc's encoding, even though technically it is insufficient. 7744 //FIXME. We should do a better job than gcc. 7745 case Type::Vector: 7746 case Type::ExtVector: 7747 // Until we have a coherent encoding of these three types, issue warning. 7748 if (NotEncodedT) 7749 *NotEncodedT = T; 7750 return; 7751 7752 case Type::ConstantMatrix: 7753 if (NotEncodedT) 7754 *NotEncodedT = T; 7755 return; 7756 7757 // We could see an undeduced auto type here during error recovery. 7758 // Just ignore it. 7759 case Type::Auto: 7760 case Type::DeducedTemplateSpecialization: 7761 return; 7762 7763 case Type::Pipe: 7764 case Type::ExtInt: 7765 #define ABSTRACT_TYPE(KIND, BASE) 7766 #define TYPE(KIND, BASE) 7767 #define DEPENDENT_TYPE(KIND, BASE) \ 7768 case Type::KIND: 7769 #define NON_CANONICAL_TYPE(KIND, BASE) \ 7770 case Type::KIND: 7771 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 7772 case Type::KIND: 7773 #include "clang/AST/TypeNodes.inc" 7774 llvm_unreachable("@encode for dependent type!"); 7775 } 7776 llvm_unreachable("bad type kind!"); 7777 } 7778 7779 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 7780 std::string &S, 7781 const FieldDecl *FD, 7782 bool includeVBases, 7783 QualType *NotEncodedT) const { 7784 assert(RDecl && "Expected non-null RecordDecl"); 7785 assert(!RDecl->isUnion() && "Should not be called for unions"); 7786 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 7787 return; 7788 7789 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 7790 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 7791 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 7792 7793 if (CXXRec) { 7794 for (const auto &BI : CXXRec->bases()) { 7795 if (!BI.isVirtual()) { 7796 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 7797 if (base->isEmpty()) 7798 continue; 7799 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 7800 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 7801 std::make_pair(offs, base)); 7802 } 7803 } 7804 } 7805 7806 unsigned i = 0; 7807 for (FieldDecl *Field : RDecl->fields()) { 7808 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 7809 continue; 7810 uint64_t offs = layout.getFieldOffset(i); 7811 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 7812 std::make_pair(offs, Field)); 7813 ++i; 7814 } 7815 7816 if (CXXRec && includeVBases) { 7817 for (const auto &BI : CXXRec->vbases()) { 7818 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 7819 if (base->isEmpty()) 7820 continue; 7821 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 7822 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 7823 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 7824 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 7825 std::make_pair(offs, base)); 7826 } 7827 } 7828 7829 CharUnits size; 7830 if (CXXRec) { 7831 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 7832 } else { 7833 size = layout.getSize(); 7834 } 7835 7836 #ifndef NDEBUG 7837 uint64_t CurOffs = 0; 7838 #endif 7839 std::multimap<uint64_t, NamedDecl *>::iterator 7840 CurLayObj = FieldOrBaseOffsets.begin(); 7841 7842 if (CXXRec && CXXRec->isDynamicClass() && 7843 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 7844 if (FD) { 7845 S += "\"_vptr$"; 7846 std::string recname = CXXRec->getNameAsString(); 7847 if (recname.empty()) recname = "?"; 7848 S += recname; 7849 S += '"'; 7850 } 7851 S += "^^?"; 7852 #ifndef NDEBUG 7853 CurOffs += getTypeSize(VoidPtrTy); 7854 #endif 7855 } 7856 7857 if (!RDecl->hasFlexibleArrayMember()) { 7858 // Mark the end of the structure. 7859 uint64_t offs = toBits(size); 7860 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 7861 std::make_pair(offs, nullptr)); 7862 } 7863 7864 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 7865 #ifndef NDEBUG 7866 assert(CurOffs <= CurLayObj->first); 7867 if (CurOffs < CurLayObj->first) { 7868 uint64_t padding = CurLayObj->first - CurOffs; 7869 // FIXME: There doesn't seem to be a way to indicate in the encoding that 7870 // packing/alignment of members is different that normal, in which case 7871 // the encoding will be out-of-sync with the real layout. 7872 // If the runtime switches to just consider the size of types without 7873 // taking into account alignment, we could make padding explicit in the 7874 // encoding (e.g. using arrays of chars). The encoding strings would be 7875 // longer then though. 7876 CurOffs += padding; 7877 } 7878 #endif 7879 7880 NamedDecl *dcl = CurLayObj->second; 7881 if (!dcl) 7882 break; // reached end of structure. 7883 7884 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 7885 // We expand the bases without their virtual bases since those are going 7886 // in the initial structure. Note that this differs from gcc which 7887 // expands virtual bases each time one is encountered in the hierarchy, 7888 // making the encoding type bigger than it really is. 7889 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 7890 NotEncodedT); 7891 assert(!base->isEmpty()); 7892 #ifndef NDEBUG 7893 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 7894 #endif 7895 } else { 7896 const auto *field = cast<FieldDecl>(dcl); 7897 if (FD) { 7898 S += '"'; 7899 S += field->getNameAsString(); 7900 S += '"'; 7901 } 7902 7903 if (field->isBitField()) { 7904 EncodeBitField(this, S, field->getType(), field); 7905 #ifndef NDEBUG 7906 CurOffs += field->getBitWidthValue(*this); 7907 #endif 7908 } else { 7909 QualType qt = field->getType(); 7910 getLegacyIntegralTypeEncoding(qt); 7911 getObjCEncodingForTypeImpl( 7912 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 7913 FD, NotEncodedT); 7914 #ifndef NDEBUG 7915 CurOffs += getTypeSize(field->getType()); 7916 #endif 7917 } 7918 } 7919 } 7920 } 7921 7922 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 7923 std::string& S) const { 7924 if (QT & Decl::OBJC_TQ_In) 7925 S += 'n'; 7926 if (QT & Decl::OBJC_TQ_Inout) 7927 S += 'N'; 7928 if (QT & Decl::OBJC_TQ_Out) 7929 S += 'o'; 7930 if (QT & Decl::OBJC_TQ_Bycopy) 7931 S += 'O'; 7932 if (QT & Decl::OBJC_TQ_Byref) 7933 S += 'R'; 7934 if (QT & Decl::OBJC_TQ_Oneway) 7935 S += 'V'; 7936 } 7937 7938 TypedefDecl *ASTContext::getObjCIdDecl() const { 7939 if (!ObjCIdDecl) { 7940 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 7941 T = getObjCObjectPointerType(T); 7942 ObjCIdDecl = buildImplicitTypedef(T, "id"); 7943 } 7944 return ObjCIdDecl; 7945 } 7946 7947 TypedefDecl *ASTContext::getObjCSelDecl() const { 7948 if (!ObjCSelDecl) { 7949 QualType T = getPointerType(ObjCBuiltinSelTy); 7950 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 7951 } 7952 return ObjCSelDecl; 7953 } 7954 7955 TypedefDecl *ASTContext::getObjCClassDecl() const { 7956 if (!ObjCClassDecl) { 7957 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 7958 T = getObjCObjectPointerType(T); 7959 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 7960 } 7961 return ObjCClassDecl; 7962 } 7963 7964 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 7965 if (!ObjCProtocolClassDecl) { 7966 ObjCProtocolClassDecl 7967 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 7968 SourceLocation(), 7969 &Idents.get("Protocol"), 7970 /*typeParamList=*/nullptr, 7971 /*PrevDecl=*/nullptr, 7972 SourceLocation(), true); 7973 } 7974 7975 return ObjCProtocolClassDecl; 7976 } 7977 7978 //===----------------------------------------------------------------------===// 7979 // __builtin_va_list Construction Functions 7980 //===----------------------------------------------------------------------===// 7981 7982 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 7983 StringRef Name) { 7984 // typedef char* __builtin[_ms]_va_list; 7985 QualType T = Context->getPointerType(Context->CharTy); 7986 return Context->buildImplicitTypedef(T, Name); 7987 } 7988 7989 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 7990 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 7991 } 7992 7993 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 7994 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 7995 } 7996 7997 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 7998 // typedef void* __builtin_va_list; 7999 QualType T = Context->getPointerType(Context->VoidTy); 8000 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8001 } 8002 8003 static TypedefDecl * 8004 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8005 // struct __va_list 8006 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8007 if (Context->getLangOpts().CPlusPlus) { 8008 // namespace std { struct __va_list { 8009 NamespaceDecl *NS; 8010 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8011 Context->getTranslationUnitDecl(), 8012 /*Inline*/ false, SourceLocation(), 8013 SourceLocation(), &Context->Idents.get("std"), 8014 /*PrevDecl*/ nullptr); 8015 NS->setImplicit(); 8016 VaListTagDecl->setDeclContext(NS); 8017 } 8018 8019 VaListTagDecl->startDefinition(); 8020 8021 const size_t NumFields = 5; 8022 QualType FieldTypes[NumFields]; 8023 const char *FieldNames[NumFields]; 8024 8025 // void *__stack; 8026 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8027 FieldNames[0] = "__stack"; 8028 8029 // void *__gr_top; 8030 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8031 FieldNames[1] = "__gr_top"; 8032 8033 // void *__vr_top; 8034 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8035 FieldNames[2] = "__vr_top"; 8036 8037 // int __gr_offs; 8038 FieldTypes[3] = Context->IntTy; 8039 FieldNames[3] = "__gr_offs"; 8040 8041 // int __vr_offs; 8042 FieldTypes[4] = Context->IntTy; 8043 FieldNames[4] = "__vr_offs"; 8044 8045 // Create fields 8046 for (unsigned i = 0; i < NumFields; ++i) { 8047 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8048 VaListTagDecl, 8049 SourceLocation(), 8050 SourceLocation(), 8051 &Context->Idents.get(FieldNames[i]), 8052 FieldTypes[i], /*TInfo=*/nullptr, 8053 /*BitWidth=*/nullptr, 8054 /*Mutable=*/false, 8055 ICIS_NoInit); 8056 Field->setAccess(AS_public); 8057 VaListTagDecl->addDecl(Field); 8058 } 8059 VaListTagDecl->completeDefinition(); 8060 Context->VaListTagDecl = VaListTagDecl; 8061 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8062 8063 // } __builtin_va_list; 8064 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8065 } 8066 8067 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8068 // typedef struct __va_list_tag { 8069 RecordDecl *VaListTagDecl; 8070 8071 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8072 VaListTagDecl->startDefinition(); 8073 8074 const size_t NumFields = 5; 8075 QualType FieldTypes[NumFields]; 8076 const char *FieldNames[NumFields]; 8077 8078 // unsigned char gpr; 8079 FieldTypes[0] = Context->UnsignedCharTy; 8080 FieldNames[0] = "gpr"; 8081 8082 // unsigned char fpr; 8083 FieldTypes[1] = Context->UnsignedCharTy; 8084 FieldNames[1] = "fpr"; 8085 8086 // unsigned short reserved; 8087 FieldTypes[2] = Context->UnsignedShortTy; 8088 FieldNames[2] = "reserved"; 8089 8090 // void* overflow_arg_area; 8091 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8092 FieldNames[3] = "overflow_arg_area"; 8093 8094 // void* reg_save_area; 8095 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8096 FieldNames[4] = "reg_save_area"; 8097 8098 // Create fields 8099 for (unsigned i = 0; i < NumFields; ++i) { 8100 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8101 SourceLocation(), 8102 SourceLocation(), 8103 &Context->Idents.get(FieldNames[i]), 8104 FieldTypes[i], /*TInfo=*/nullptr, 8105 /*BitWidth=*/nullptr, 8106 /*Mutable=*/false, 8107 ICIS_NoInit); 8108 Field->setAccess(AS_public); 8109 VaListTagDecl->addDecl(Field); 8110 } 8111 VaListTagDecl->completeDefinition(); 8112 Context->VaListTagDecl = VaListTagDecl; 8113 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8114 8115 // } __va_list_tag; 8116 TypedefDecl *VaListTagTypedefDecl = 8117 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8118 8119 QualType VaListTagTypedefType = 8120 Context->getTypedefType(VaListTagTypedefDecl); 8121 8122 // typedef __va_list_tag __builtin_va_list[1]; 8123 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8124 QualType VaListTagArrayType 8125 = Context->getConstantArrayType(VaListTagTypedefType, 8126 Size, nullptr, ArrayType::Normal, 0); 8127 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8128 } 8129 8130 static TypedefDecl * 8131 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8132 // struct __va_list_tag { 8133 RecordDecl *VaListTagDecl; 8134 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8135 VaListTagDecl->startDefinition(); 8136 8137 const size_t NumFields = 4; 8138 QualType FieldTypes[NumFields]; 8139 const char *FieldNames[NumFields]; 8140 8141 // unsigned gp_offset; 8142 FieldTypes[0] = Context->UnsignedIntTy; 8143 FieldNames[0] = "gp_offset"; 8144 8145 // unsigned fp_offset; 8146 FieldTypes[1] = Context->UnsignedIntTy; 8147 FieldNames[1] = "fp_offset"; 8148 8149 // void* overflow_arg_area; 8150 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8151 FieldNames[2] = "overflow_arg_area"; 8152 8153 // void* reg_save_area; 8154 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8155 FieldNames[3] = "reg_save_area"; 8156 8157 // Create fields 8158 for (unsigned i = 0; i < NumFields; ++i) { 8159 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8160 VaListTagDecl, 8161 SourceLocation(), 8162 SourceLocation(), 8163 &Context->Idents.get(FieldNames[i]), 8164 FieldTypes[i], /*TInfo=*/nullptr, 8165 /*BitWidth=*/nullptr, 8166 /*Mutable=*/false, 8167 ICIS_NoInit); 8168 Field->setAccess(AS_public); 8169 VaListTagDecl->addDecl(Field); 8170 } 8171 VaListTagDecl->completeDefinition(); 8172 Context->VaListTagDecl = VaListTagDecl; 8173 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8174 8175 // }; 8176 8177 // typedef struct __va_list_tag __builtin_va_list[1]; 8178 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8179 QualType VaListTagArrayType = Context->getConstantArrayType( 8180 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8181 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8182 } 8183 8184 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8185 // typedef int __builtin_va_list[4]; 8186 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8187 QualType IntArrayType = Context->getConstantArrayType( 8188 Context->IntTy, Size, nullptr, ArrayType::Normal, 0); 8189 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8190 } 8191 8192 static TypedefDecl * 8193 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8194 // struct __va_list 8195 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8196 if (Context->getLangOpts().CPlusPlus) { 8197 // namespace std { struct __va_list { 8198 NamespaceDecl *NS; 8199 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8200 Context->getTranslationUnitDecl(), 8201 /*Inline*/false, SourceLocation(), 8202 SourceLocation(), &Context->Idents.get("std"), 8203 /*PrevDecl*/ nullptr); 8204 NS->setImplicit(); 8205 VaListDecl->setDeclContext(NS); 8206 } 8207 8208 VaListDecl->startDefinition(); 8209 8210 // void * __ap; 8211 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8212 VaListDecl, 8213 SourceLocation(), 8214 SourceLocation(), 8215 &Context->Idents.get("__ap"), 8216 Context->getPointerType(Context->VoidTy), 8217 /*TInfo=*/nullptr, 8218 /*BitWidth=*/nullptr, 8219 /*Mutable=*/false, 8220 ICIS_NoInit); 8221 Field->setAccess(AS_public); 8222 VaListDecl->addDecl(Field); 8223 8224 // }; 8225 VaListDecl->completeDefinition(); 8226 Context->VaListTagDecl = VaListDecl; 8227 8228 // typedef struct __va_list __builtin_va_list; 8229 QualType T = Context->getRecordType(VaListDecl); 8230 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8231 } 8232 8233 static TypedefDecl * 8234 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8235 // struct __va_list_tag { 8236 RecordDecl *VaListTagDecl; 8237 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8238 VaListTagDecl->startDefinition(); 8239 8240 const size_t NumFields = 4; 8241 QualType FieldTypes[NumFields]; 8242 const char *FieldNames[NumFields]; 8243 8244 // long __gpr; 8245 FieldTypes[0] = Context->LongTy; 8246 FieldNames[0] = "__gpr"; 8247 8248 // long __fpr; 8249 FieldTypes[1] = Context->LongTy; 8250 FieldNames[1] = "__fpr"; 8251 8252 // void *__overflow_arg_area; 8253 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8254 FieldNames[2] = "__overflow_arg_area"; 8255 8256 // void *__reg_save_area; 8257 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8258 FieldNames[3] = "__reg_save_area"; 8259 8260 // Create fields 8261 for (unsigned i = 0; i < NumFields; ++i) { 8262 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8263 VaListTagDecl, 8264 SourceLocation(), 8265 SourceLocation(), 8266 &Context->Idents.get(FieldNames[i]), 8267 FieldTypes[i], /*TInfo=*/nullptr, 8268 /*BitWidth=*/nullptr, 8269 /*Mutable=*/false, 8270 ICIS_NoInit); 8271 Field->setAccess(AS_public); 8272 VaListTagDecl->addDecl(Field); 8273 } 8274 VaListTagDecl->completeDefinition(); 8275 Context->VaListTagDecl = VaListTagDecl; 8276 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8277 8278 // }; 8279 8280 // typedef __va_list_tag __builtin_va_list[1]; 8281 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8282 QualType VaListTagArrayType = Context->getConstantArrayType( 8283 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8284 8285 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8286 } 8287 8288 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 8289 // typedef struct __va_list_tag { 8290 RecordDecl *VaListTagDecl; 8291 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8292 VaListTagDecl->startDefinition(); 8293 8294 const size_t NumFields = 3; 8295 QualType FieldTypes[NumFields]; 8296 const char *FieldNames[NumFields]; 8297 8298 // void *CurrentSavedRegisterArea; 8299 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8300 FieldNames[0] = "__current_saved_reg_area_pointer"; 8301 8302 // void *SavedRegAreaEnd; 8303 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8304 FieldNames[1] = "__saved_reg_area_end_pointer"; 8305 8306 // void *OverflowArea; 8307 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8308 FieldNames[2] = "__overflow_area_pointer"; 8309 8310 // Create fields 8311 for (unsigned i = 0; i < NumFields; ++i) { 8312 FieldDecl *Field = FieldDecl::Create( 8313 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 8314 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 8315 /*TInfo=*/0, 8316 /*BitWidth=*/0, 8317 /*Mutable=*/false, ICIS_NoInit); 8318 Field->setAccess(AS_public); 8319 VaListTagDecl->addDecl(Field); 8320 } 8321 VaListTagDecl->completeDefinition(); 8322 Context->VaListTagDecl = VaListTagDecl; 8323 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8324 8325 // } __va_list_tag; 8326 TypedefDecl *VaListTagTypedefDecl = 8327 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8328 8329 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 8330 8331 // typedef __va_list_tag __builtin_va_list[1]; 8332 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8333 QualType VaListTagArrayType = Context->getConstantArrayType( 8334 VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); 8335 8336 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8337 } 8338 8339 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 8340 TargetInfo::BuiltinVaListKind Kind) { 8341 switch (Kind) { 8342 case TargetInfo::CharPtrBuiltinVaList: 8343 return CreateCharPtrBuiltinVaListDecl(Context); 8344 case TargetInfo::VoidPtrBuiltinVaList: 8345 return CreateVoidPtrBuiltinVaListDecl(Context); 8346 case TargetInfo::AArch64ABIBuiltinVaList: 8347 return CreateAArch64ABIBuiltinVaListDecl(Context); 8348 case TargetInfo::PowerABIBuiltinVaList: 8349 return CreatePowerABIBuiltinVaListDecl(Context); 8350 case TargetInfo::X86_64ABIBuiltinVaList: 8351 return CreateX86_64ABIBuiltinVaListDecl(Context); 8352 case TargetInfo::PNaClABIBuiltinVaList: 8353 return CreatePNaClABIBuiltinVaListDecl(Context); 8354 case TargetInfo::AAPCSABIBuiltinVaList: 8355 return CreateAAPCSABIBuiltinVaListDecl(Context); 8356 case TargetInfo::SystemZBuiltinVaList: 8357 return CreateSystemZBuiltinVaListDecl(Context); 8358 case TargetInfo::HexagonBuiltinVaList: 8359 return CreateHexagonBuiltinVaListDecl(Context); 8360 } 8361 8362 llvm_unreachable("Unhandled __builtin_va_list type kind"); 8363 } 8364 8365 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 8366 if (!BuiltinVaListDecl) { 8367 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 8368 assert(BuiltinVaListDecl->isImplicit()); 8369 } 8370 8371 return BuiltinVaListDecl; 8372 } 8373 8374 Decl *ASTContext::getVaListTagDecl() const { 8375 // Force the creation of VaListTagDecl by building the __builtin_va_list 8376 // declaration. 8377 if (!VaListTagDecl) 8378 (void)getBuiltinVaListDecl(); 8379 8380 return VaListTagDecl; 8381 } 8382 8383 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 8384 if (!BuiltinMSVaListDecl) 8385 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 8386 8387 return BuiltinMSVaListDecl; 8388 } 8389 8390 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 8391 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 8392 } 8393 8394 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 8395 assert(ObjCConstantStringType.isNull() && 8396 "'NSConstantString' type already set!"); 8397 8398 ObjCConstantStringType = getObjCInterfaceType(Decl); 8399 } 8400 8401 /// Retrieve the template name that corresponds to a non-empty 8402 /// lookup. 8403 TemplateName 8404 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 8405 UnresolvedSetIterator End) const { 8406 unsigned size = End - Begin; 8407 assert(size > 1 && "set is not overloaded!"); 8408 8409 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 8410 size * sizeof(FunctionTemplateDecl*)); 8411 auto *OT = new (memory) OverloadedTemplateStorage(size); 8412 8413 NamedDecl **Storage = OT->getStorage(); 8414 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 8415 NamedDecl *D = *I; 8416 assert(isa<FunctionTemplateDecl>(D) || 8417 isa<UnresolvedUsingValueDecl>(D) || 8418 (isa<UsingShadowDecl>(D) && 8419 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 8420 *Storage++ = D; 8421 } 8422 8423 return TemplateName(OT); 8424 } 8425 8426 /// Retrieve a template name representing an unqualified-id that has been 8427 /// assumed to name a template for ADL purposes. 8428 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 8429 auto *OT = new (*this) AssumedTemplateStorage(Name); 8430 return TemplateName(OT); 8431 } 8432 8433 /// Retrieve the template name that represents a qualified 8434 /// template name such as \c std::vector. 8435 TemplateName 8436 ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 8437 bool TemplateKeyword, 8438 TemplateDecl *Template) const { 8439 assert(NNS && "Missing nested-name-specifier in qualified template name"); 8440 8441 // FIXME: Canonicalization? 8442 llvm::FoldingSetNodeID ID; 8443 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 8444 8445 void *InsertPos = nullptr; 8446 QualifiedTemplateName *QTN = 8447 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8448 if (!QTN) { 8449 QTN = new (*this, alignof(QualifiedTemplateName)) 8450 QualifiedTemplateName(NNS, TemplateKeyword, Template); 8451 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 8452 } 8453 8454 return TemplateName(QTN); 8455 } 8456 8457 /// Retrieve the template name that represents a dependent 8458 /// template name such as \c MetaFun::template apply. 8459 TemplateName 8460 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 8461 const IdentifierInfo *Name) const { 8462 assert((!NNS || NNS->isDependent()) && 8463 "Nested name specifier must be dependent"); 8464 8465 llvm::FoldingSetNodeID ID; 8466 DependentTemplateName::Profile(ID, NNS, Name); 8467 8468 void *InsertPos = nullptr; 8469 DependentTemplateName *QTN = 8470 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8471 8472 if (QTN) 8473 return TemplateName(QTN); 8474 8475 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 8476 if (CanonNNS == NNS) { 8477 QTN = new (*this, alignof(DependentTemplateName)) 8478 DependentTemplateName(NNS, Name); 8479 } else { 8480 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 8481 QTN = new (*this, alignof(DependentTemplateName)) 8482 DependentTemplateName(NNS, Name, Canon); 8483 DependentTemplateName *CheckQTN = 8484 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8485 assert(!CheckQTN && "Dependent type name canonicalization broken"); 8486 (void)CheckQTN; 8487 } 8488 8489 DependentTemplateNames.InsertNode(QTN, InsertPos); 8490 return TemplateName(QTN); 8491 } 8492 8493 /// Retrieve the template name that represents a dependent 8494 /// template name such as \c MetaFun::template operator+. 8495 TemplateName 8496 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 8497 OverloadedOperatorKind Operator) const { 8498 assert((!NNS || NNS->isDependent()) && 8499 "Nested name specifier must be dependent"); 8500 8501 llvm::FoldingSetNodeID ID; 8502 DependentTemplateName::Profile(ID, NNS, Operator); 8503 8504 void *InsertPos = nullptr; 8505 DependentTemplateName *QTN 8506 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8507 8508 if (QTN) 8509 return TemplateName(QTN); 8510 8511 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 8512 if (CanonNNS == NNS) { 8513 QTN = new (*this, alignof(DependentTemplateName)) 8514 DependentTemplateName(NNS, Operator); 8515 } else { 8516 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 8517 QTN = new (*this, alignof(DependentTemplateName)) 8518 DependentTemplateName(NNS, Operator, Canon); 8519 8520 DependentTemplateName *CheckQTN 8521 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8522 assert(!CheckQTN && "Dependent template name canonicalization broken"); 8523 (void)CheckQTN; 8524 } 8525 8526 DependentTemplateNames.InsertNode(QTN, InsertPos); 8527 return TemplateName(QTN); 8528 } 8529 8530 TemplateName 8531 ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, 8532 TemplateName replacement) const { 8533 llvm::FoldingSetNodeID ID; 8534 SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); 8535 8536 void *insertPos = nullptr; 8537 SubstTemplateTemplateParmStorage *subst 8538 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 8539 8540 if (!subst) { 8541 subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); 8542 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 8543 } 8544 8545 return TemplateName(subst); 8546 } 8547 8548 TemplateName 8549 ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, 8550 const TemplateArgument &ArgPack) const { 8551 auto &Self = const_cast<ASTContext &>(*this); 8552 llvm::FoldingSetNodeID ID; 8553 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); 8554 8555 void *InsertPos = nullptr; 8556 SubstTemplateTemplateParmPackStorage *Subst 8557 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 8558 8559 if (!Subst) { 8560 Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, 8561 ArgPack.pack_size(), 8562 ArgPack.pack_begin()); 8563 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 8564 } 8565 8566 return TemplateName(Subst); 8567 } 8568 8569 /// getFromTargetType - Given one of the integer types provided by 8570 /// TargetInfo, produce the corresponding type. The unsigned @p Type 8571 /// is actually a value of type @c TargetInfo::IntType. 8572 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 8573 switch (Type) { 8574 case TargetInfo::NoInt: return {}; 8575 case TargetInfo::SignedChar: return SignedCharTy; 8576 case TargetInfo::UnsignedChar: return UnsignedCharTy; 8577 case TargetInfo::SignedShort: return ShortTy; 8578 case TargetInfo::UnsignedShort: return UnsignedShortTy; 8579 case TargetInfo::SignedInt: return IntTy; 8580 case TargetInfo::UnsignedInt: return UnsignedIntTy; 8581 case TargetInfo::SignedLong: return LongTy; 8582 case TargetInfo::UnsignedLong: return UnsignedLongTy; 8583 case TargetInfo::SignedLongLong: return LongLongTy; 8584 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 8585 } 8586 8587 llvm_unreachable("Unhandled TargetInfo::IntType value"); 8588 } 8589 8590 //===----------------------------------------------------------------------===// 8591 // Type Predicates. 8592 //===----------------------------------------------------------------------===// 8593 8594 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 8595 /// garbage collection attribute. 8596 /// 8597 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 8598 if (getLangOpts().getGC() == LangOptions::NonGC) 8599 return Qualifiers::GCNone; 8600 8601 assert(getLangOpts().ObjC); 8602 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 8603 8604 // Default behaviour under objective-C's gc is for ObjC pointers 8605 // (or pointers to them) be treated as though they were declared 8606 // as __strong. 8607 if (GCAttrs == Qualifiers::GCNone) { 8608 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 8609 return Qualifiers::Strong; 8610 else if (Ty->isPointerType()) 8611 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 8612 } else { 8613 // It's not valid to set GC attributes on anything that isn't a 8614 // pointer. 8615 #ifndef NDEBUG 8616 QualType CT = Ty->getCanonicalTypeInternal(); 8617 while (const auto *AT = dyn_cast<ArrayType>(CT)) 8618 CT = AT->getElementType(); 8619 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 8620 #endif 8621 } 8622 return GCAttrs; 8623 } 8624 8625 //===----------------------------------------------------------------------===// 8626 // Type Compatibility Testing 8627 //===----------------------------------------------------------------------===// 8628 8629 /// areCompatVectorTypes - Return true if the two specified vector types are 8630 /// compatible. 8631 static bool areCompatVectorTypes(const VectorType *LHS, 8632 const VectorType *RHS) { 8633 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 8634 return LHS->getElementType() == RHS->getElementType() && 8635 LHS->getNumElements() == RHS->getNumElements(); 8636 } 8637 8638 /// areCompatMatrixTypes - Return true if the two specified matrix types are 8639 /// compatible. 8640 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 8641 const ConstantMatrixType *RHS) { 8642 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 8643 return LHS->getElementType() == RHS->getElementType() && 8644 LHS->getNumRows() == RHS->getNumRows() && 8645 LHS->getNumColumns() == RHS->getNumColumns(); 8646 } 8647 8648 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 8649 QualType SecondVec) { 8650 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 8651 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 8652 8653 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 8654 return true; 8655 8656 // Treat Neon vector types and most AltiVec vector types as if they are the 8657 // equivalent GCC vector types. 8658 const auto *First = FirstVec->castAs<VectorType>(); 8659 const auto *Second = SecondVec->castAs<VectorType>(); 8660 if (First->getNumElements() == Second->getNumElements() && 8661 hasSameType(First->getElementType(), Second->getElementType()) && 8662 First->getVectorKind() != VectorType::AltiVecPixel && 8663 First->getVectorKind() != VectorType::AltiVecBool && 8664 Second->getVectorKind() != VectorType::AltiVecPixel && 8665 Second->getVectorKind() != VectorType::AltiVecBool && 8666 First->getVectorKind() != VectorType::SveFixedLengthDataVector && 8667 First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && 8668 Second->getVectorKind() != VectorType::SveFixedLengthDataVector && 8669 Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) 8670 return true; 8671 8672 return false; 8673 } 8674 8675 /// getSVETypeSize - Return SVE vector or predicate register size. 8676 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 8677 assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); 8678 return Ty->getKind() == BuiltinType::SveBool 8679 ? Context.getLangOpts().ArmSveVectorBits / Context.getCharWidth() 8680 : Context.getLangOpts().ArmSveVectorBits; 8681 } 8682 8683 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 8684 QualType SecondType) { 8685 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 8686 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 8687 "Expected SVE builtin type and vector type!"); 8688 8689 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 8690 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 8691 if (const auto *VT = SecondType->getAs<VectorType>()) { 8692 // Predicates have the same representation as uint8 so we also have to 8693 // check the kind to make these types incompatible. 8694 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 8695 return BT->getKind() == BuiltinType::SveBool; 8696 else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 8697 return VT->getElementType().getCanonicalType() == 8698 FirstType->getSveEltType(*this); 8699 else if (VT->getVectorKind() == VectorType::GenericVector) 8700 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 8701 hasSameType(VT->getElementType(), 8702 getBuiltinVectorTypeInfo(BT).ElementType); 8703 } 8704 } 8705 return false; 8706 }; 8707 8708 return IsValidCast(FirstType, SecondType) || 8709 IsValidCast(SecondType, FirstType); 8710 } 8711 8712 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 8713 QualType SecondType) { 8714 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 8715 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 8716 "Expected SVE builtin type and vector type!"); 8717 8718 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 8719 const auto *BT = FirstType->getAs<BuiltinType>(); 8720 if (!BT) 8721 return false; 8722 8723 const auto *VecTy = SecondType->getAs<VectorType>(); 8724 if (VecTy && 8725 (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || 8726 VecTy->getVectorKind() == VectorType::GenericVector)) { 8727 const LangOptions::LaxVectorConversionKind LVCKind = 8728 getLangOpts().getLaxVectorConversions(); 8729 8730 // Can not convert between sve predicates and sve vectors because of 8731 // different size. 8732 if (BT->getKind() == BuiltinType::SveBool && 8733 VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) 8734 return false; 8735 8736 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 8737 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 8738 // converts to VLAT and VLAT implicitly converts to GNUT." 8739 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 8740 // predicates. 8741 if (VecTy->getVectorKind() == VectorType::GenericVector && 8742 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 8743 return false; 8744 8745 // If -flax-vector-conversions=all is specified, the types are 8746 // certainly compatible. 8747 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 8748 return true; 8749 8750 // If -flax-vector-conversions=integer is specified, the types are 8751 // compatible if the elements are integer types. 8752 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 8753 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 8754 FirstType->getSveEltType(*this)->isIntegerType(); 8755 } 8756 8757 return false; 8758 }; 8759 8760 return IsLaxCompatible(FirstType, SecondType) || 8761 IsLaxCompatible(SecondType, FirstType); 8762 } 8763 8764 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 8765 while (true) { 8766 // __strong id 8767 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 8768 if (Attr->getAttrKind() == attr::ObjCOwnership) 8769 return true; 8770 8771 Ty = Attr->getModifiedType(); 8772 8773 // X *__strong (...) 8774 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 8775 Ty = Paren->getInnerType(); 8776 8777 // We do not want to look through typedefs, typeof(expr), 8778 // typeof(type), or any other way that the type is somehow 8779 // abstracted. 8780 } else { 8781 return false; 8782 } 8783 } 8784 } 8785 8786 //===----------------------------------------------------------------------===// 8787 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 8788 //===----------------------------------------------------------------------===// 8789 8790 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 8791 /// inheritance hierarchy of 'rProto'. 8792 bool 8793 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 8794 ObjCProtocolDecl *rProto) const { 8795 if (declaresSameEntity(lProto, rProto)) 8796 return true; 8797 for (auto *PI : rProto->protocols()) 8798 if (ProtocolCompatibleWithProtocol(lProto, PI)) 8799 return true; 8800 return false; 8801 } 8802 8803 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 8804 /// Class<pr1, ...>. 8805 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 8806 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 8807 for (auto *lhsProto : lhs->quals()) { 8808 bool match = false; 8809 for (auto *rhsProto : rhs->quals()) { 8810 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 8811 match = true; 8812 break; 8813 } 8814 } 8815 if (!match) 8816 return false; 8817 } 8818 return true; 8819 } 8820 8821 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 8822 /// ObjCQualifiedIDType. 8823 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 8824 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 8825 bool compare) { 8826 // Allow id<P..> and an 'id' in all cases. 8827 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 8828 return true; 8829 8830 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 8831 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 8832 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 8833 return false; 8834 8835 if (lhs->isObjCQualifiedIdType()) { 8836 if (rhs->qual_empty()) { 8837 // If the RHS is a unqualified interface pointer "NSString*", 8838 // make sure we check the class hierarchy. 8839 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 8840 for (auto *I : lhs->quals()) { 8841 // when comparing an id<P> on lhs with a static type on rhs, 8842 // see if static class implements all of id's protocols, directly or 8843 // through its super class and categories. 8844 if (!rhsID->ClassImplementsProtocol(I, true)) 8845 return false; 8846 } 8847 } 8848 // If there are no qualifiers and no interface, we have an 'id'. 8849 return true; 8850 } 8851 // Both the right and left sides have qualifiers. 8852 for (auto *lhsProto : lhs->quals()) { 8853 bool match = false; 8854 8855 // when comparing an id<P> on lhs with a static type on rhs, 8856 // see if static class implements all of id's protocols, directly or 8857 // through its super class and categories. 8858 for (auto *rhsProto : rhs->quals()) { 8859 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 8860 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 8861 match = true; 8862 break; 8863 } 8864 } 8865 // If the RHS is a qualified interface pointer "NSString<P>*", 8866 // make sure we check the class hierarchy. 8867 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 8868 for (auto *I : lhs->quals()) { 8869 // when comparing an id<P> on lhs with a static type on rhs, 8870 // see if static class implements all of id's protocols, directly or 8871 // through its super class and categories. 8872 if (rhsID->ClassImplementsProtocol(I, true)) { 8873 match = true; 8874 break; 8875 } 8876 } 8877 } 8878 if (!match) 8879 return false; 8880 } 8881 8882 return true; 8883 } 8884 8885 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 8886 8887 if (lhs->getInterfaceType()) { 8888 // If both the right and left sides have qualifiers. 8889 for (auto *lhsProto : lhs->quals()) { 8890 bool match = false; 8891 8892 // when comparing an id<P> on rhs with a static type on lhs, 8893 // see if static class implements all of id's protocols, directly or 8894 // through its super class and categories. 8895 // First, lhs protocols in the qualifier list must be found, direct 8896 // or indirect in rhs's qualifier list or it is a mismatch. 8897 for (auto *rhsProto : rhs->quals()) { 8898 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 8899 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 8900 match = true; 8901 break; 8902 } 8903 } 8904 if (!match) 8905 return false; 8906 } 8907 8908 // Static class's protocols, or its super class or category protocols 8909 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 8910 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 8911 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 8912 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 8913 // This is rather dubious but matches gcc's behavior. If lhs has 8914 // no type qualifier and its class has no static protocol(s) 8915 // assume that it is mismatch. 8916 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 8917 return false; 8918 for (auto *lhsProto : LHSInheritedProtocols) { 8919 bool match = false; 8920 for (auto *rhsProto : rhs->quals()) { 8921 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 8922 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 8923 match = true; 8924 break; 8925 } 8926 } 8927 if (!match) 8928 return false; 8929 } 8930 } 8931 return true; 8932 } 8933 return false; 8934 } 8935 8936 /// canAssignObjCInterfaces - Return true if the two interface types are 8937 /// compatible for assignment from RHS to LHS. This handles validation of any 8938 /// protocol qualifiers on the LHS or RHS. 8939 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 8940 const ObjCObjectPointerType *RHSOPT) { 8941 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 8942 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 8943 8944 // If either type represents the built-in 'id' type, return true. 8945 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 8946 return true; 8947 8948 // Function object that propagates a successful result or handles 8949 // __kindof types. 8950 auto finish = [&](bool succeeded) -> bool { 8951 if (succeeded) 8952 return true; 8953 8954 if (!RHS->isKindOfType()) 8955 return false; 8956 8957 // Strip off __kindof and protocol qualifiers, then check whether 8958 // we can assign the other way. 8959 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 8960 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 8961 }; 8962 8963 // Casts from or to id<P> are allowed when the other side has compatible 8964 // protocols. 8965 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 8966 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 8967 } 8968 8969 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 8970 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 8971 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 8972 } 8973 8974 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 8975 if (LHS->isObjCClass() && RHS->isObjCClass()) { 8976 return true; 8977 } 8978 8979 // If we have 2 user-defined types, fall into that path. 8980 if (LHS->getInterface() && RHS->getInterface()) { 8981 return finish(canAssignObjCInterfaces(LHS, RHS)); 8982 } 8983 8984 return false; 8985 } 8986 8987 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 8988 /// for providing type-safety for objective-c pointers used to pass/return 8989 /// arguments in block literals. When passed as arguments, passing 'A*' where 8990 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 8991 /// not OK. For the return type, the opposite is not OK. 8992 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 8993 const ObjCObjectPointerType *LHSOPT, 8994 const ObjCObjectPointerType *RHSOPT, 8995 bool BlockReturnType) { 8996 8997 // Function object that propagates a successful result or handles 8998 // __kindof types. 8999 auto finish = [&](bool succeeded) -> bool { 9000 if (succeeded) 9001 return true; 9002 9003 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9004 if (!Expected->isKindOfType()) 9005 return false; 9006 9007 // Strip off __kindof and protocol qualifiers, then check whether 9008 // we can assign the other way. 9009 return canAssignObjCInterfacesInBlockPointer( 9010 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9011 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9012 BlockReturnType); 9013 }; 9014 9015 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9016 return true; 9017 9018 if (LHSOPT->isObjCBuiltinType()) { 9019 return finish(RHSOPT->isObjCBuiltinType() || 9020 RHSOPT->isObjCQualifiedIdType()); 9021 } 9022 9023 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9024 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9025 // Use for block parameters previous type checking for compatibility. 9026 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9027 // Or corrected type checking as in non-compat mode. 9028 (!BlockReturnType && 9029 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9030 else 9031 return finish(ObjCQualifiedIdTypesAreCompatible( 9032 (BlockReturnType ? LHSOPT : RHSOPT), 9033 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9034 } 9035 9036 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9037 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9038 if (LHS && RHS) { // We have 2 user-defined types. 9039 if (LHS != RHS) { 9040 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9041 return finish(BlockReturnType); 9042 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9043 return finish(!BlockReturnType); 9044 } 9045 else 9046 return true; 9047 } 9048 return false; 9049 } 9050 9051 /// Comparison routine for Objective-C protocols to be used with 9052 /// llvm::array_pod_sort. 9053 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9054 ObjCProtocolDecl * const *rhs) { 9055 return (*lhs)->getName().compare((*rhs)->getName()); 9056 } 9057 9058 /// getIntersectionOfProtocols - This routine finds the intersection of set 9059 /// of protocols inherited from two distinct objective-c pointer objects with 9060 /// the given common base. 9061 /// It is used to build composite qualifier list of the composite type of 9062 /// the conditional expression involving two objective-c pointer objects. 9063 static 9064 void getIntersectionOfProtocols(ASTContext &Context, 9065 const ObjCInterfaceDecl *CommonBase, 9066 const ObjCObjectPointerType *LHSOPT, 9067 const ObjCObjectPointerType *RHSOPT, 9068 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9069 9070 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9071 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9072 assert(LHS->getInterface() && "LHS must have an interface base"); 9073 assert(RHS->getInterface() && "RHS must have an interface base"); 9074 9075 // Add all of the protocols for the LHS. 9076 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9077 9078 // Start with the protocol qualifiers. 9079 for (auto proto : LHS->quals()) { 9080 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9081 } 9082 9083 // Also add the protocols associated with the LHS interface. 9084 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9085 9086 // Add all of the protocols for the RHS. 9087 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9088 9089 // Start with the protocol qualifiers. 9090 for (auto proto : RHS->quals()) { 9091 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9092 } 9093 9094 // Also add the protocols associated with the RHS interface. 9095 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9096 9097 // Compute the intersection of the collected protocol sets. 9098 for (auto proto : LHSProtocolSet) { 9099 if (RHSProtocolSet.count(proto)) 9100 IntersectionSet.push_back(proto); 9101 } 9102 9103 // Compute the set of protocols that is implied by either the common type or 9104 // the protocols within the intersection. 9105 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9106 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9107 9108 // Remove any implied protocols from the list of inherited protocols. 9109 if (!ImpliedProtocols.empty()) { 9110 IntersectionSet.erase( 9111 std::remove_if(IntersectionSet.begin(), 9112 IntersectionSet.end(), 9113 [&](ObjCProtocolDecl *proto) -> bool { 9114 return ImpliedProtocols.count(proto) > 0; 9115 }), 9116 IntersectionSet.end()); 9117 } 9118 9119 // Sort the remaining protocols by name. 9120 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9121 compareObjCProtocolsByName); 9122 } 9123 9124 /// Determine whether the first type is a subtype of the second. 9125 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9126 QualType rhs) { 9127 // Common case: two object pointers. 9128 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9129 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9130 if (lhsOPT && rhsOPT) 9131 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9132 9133 // Two block pointers. 9134 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9135 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9136 if (lhsBlock && rhsBlock) 9137 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9138 9139 // If either is an unqualified 'id' and the other is a block, it's 9140 // acceptable. 9141 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9142 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9143 return true; 9144 9145 return false; 9146 } 9147 9148 // Check that the given Objective-C type argument lists are equivalent. 9149 static bool sameObjCTypeArgs(ASTContext &ctx, 9150 const ObjCInterfaceDecl *iface, 9151 ArrayRef<QualType> lhsArgs, 9152 ArrayRef<QualType> rhsArgs, 9153 bool stripKindOf) { 9154 if (lhsArgs.size() != rhsArgs.size()) 9155 return false; 9156 9157 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9158 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9159 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9160 continue; 9161 9162 switch (typeParams->begin()[i]->getVariance()) { 9163 case ObjCTypeParamVariance::Invariant: 9164 if (!stripKindOf || 9165 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9166 rhsArgs[i].stripObjCKindOfType(ctx))) { 9167 return false; 9168 } 9169 break; 9170 9171 case ObjCTypeParamVariance::Covariant: 9172 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 9173 return false; 9174 break; 9175 9176 case ObjCTypeParamVariance::Contravariant: 9177 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 9178 return false; 9179 break; 9180 } 9181 } 9182 9183 return true; 9184 } 9185 9186 QualType ASTContext::areCommonBaseCompatible( 9187 const ObjCObjectPointerType *Lptr, 9188 const ObjCObjectPointerType *Rptr) { 9189 const ObjCObjectType *LHS = Lptr->getObjectType(); 9190 const ObjCObjectType *RHS = Rptr->getObjectType(); 9191 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 9192 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 9193 9194 if (!LDecl || !RDecl) 9195 return {}; 9196 9197 // When either LHS or RHS is a kindof type, we should return a kindof type. 9198 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 9199 // kindof(A). 9200 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 9201 9202 // Follow the left-hand side up the class hierarchy until we either hit a 9203 // root or find the RHS. Record the ancestors in case we don't find it. 9204 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 9205 LHSAncestors; 9206 while (true) { 9207 // Record this ancestor. We'll need this if the common type isn't in the 9208 // path from the LHS to the root. 9209 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 9210 9211 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 9212 // Get the type arguments. 9213 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 9214 bool anyChanges = false; 9215 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9216 // Both have type arguments, compare them. 9217 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9218 LHS->getTypeArgs(), RHS->getTypeArgs(), 9219 /*stripKindOf=*/true)) 9220 return {}; 9221 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9222 // If only one has type arguments, the result will not have type 9223 // arguments. 9224 LHSTypeArgs = {}; 9225 anyChanges = true; 9226 } 9227 9228 // Compute the intersection of protocols. 9229 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9230 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 9231 Protocols); 9232 if (!Protocols.empty()) 9233 anyChanges = true; 9234 9235 // If anything in the LHS will have changed, build a new result type. 9236 // If we need to return a kindof type but LHS is not a kindof type, we 9237 // build a new result type. 9238 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 9239 QualType Result = getObjCInterfaceType(LHS->getInterface()); 9240 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 9241 anyKindOf || LHS->isKindOfType()); 9242 return getObjCObjectPointerType(Result); 9243 } 9244 9245 return getObjCObjectPointerType(QualType(LHS, 0)); 9246 } 9247 9248 // Find the superclass. 9249 QualType LHSSuperType = LHS->getSuperClassType(); 9250 if (LHSSuperType.isNull()) 9251 break; 9252 9253 LHS = LHSSuperType->castAs<ObjCObjectType>(); 9254 } 9255 9256 // We didn't find anything by following the LHS to its root; now check 9257 // the RHS against the cached set of ancestors. 9258 while (true) { 9259 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 9260 if (KnownLHS != LHSAncestors.end()) { 9261 LHS = KnownLHS->second; 9262 9263 // Get the type arguments. 9264 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 9265 bool anyChanges = false; 9266 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9267 // Both have type arguments, compare them. 9268 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9269 LHS->getTypeArgs(), RHS->getTypeArgs(), 9270 /*stripKindOf=*/true)) 9271 return {}; 9272 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9273 // If only one has type arguments, the result will not have type 9274 // arguments. 9275 RHSTypeArgs = {}; 9276 anyChanges = true; 9277 } 9278 9279 // Compute the intersection of protocols. 9280 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9281 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 9282 Protocols); 9283 if (!Protocols.empty()) 9284 anyChanges = true; 9285 9286 // If we need to return a kindof type but RHS is not a kindof type, we 9287 // build a new result type. 9288 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 9289 QualType Result = getObjCInterfaceType(RHS->getInterface()); 9290 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 9291 anyKindOf || RHS->isKindOfType()); 9292 return getObjCObjectPointerType(Result); 9293 } 9294 9295 return getObjCObjectPointerType(QualType(RHS, 0)); 9296 } 9297 9298 // Find the superclass of the RHS. 9299 QualType RHSSuperType = RHS->getSuperClassType(); 9300 if (RHSSuperType.isNull()) 9301 break; 9302 9303 RHS = RHSSuperType->castAs<ObjCObjectType>(); 9304 } 9305 9306 return {}; 9307 } 9308 9309 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 9310 const ObjCObjectType *RHS) { 9311 assert(LHS->getInterface() && "LHS is not an interface type"); 9312 assert(RHS->getInterface() && "RHS is not an interface type"); 9313 9314 // Verify that the base decls are compatible: the RHS must be a subclass of 9315 // the LHS. 9316 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 9317 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 9318 if (!IsSuperClass) 9319 return false; 9320 9321 // If the LHS has protocol qualifiers, determine whether all of them are 9322 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 9323 // LHS). 9324 if (LHS->getNumProtocols() > 0) { 9325 // OK if conversion of LHS to SuperClass results in narrowing of types 9326 // ; i.e., SuperClass may implement at least one of the protocols 9327 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 9328 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 9329 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 9330 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 9331 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 9332 // qualifiers. 9333 for (auto *RHSPI : RHS->quals()) 9334 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 9335 // If there is no protocols associated with RHS, it is not a match. 9336 if (SuperClassInheritedProtocols.empty()) 9337 return false; 9338 9339 for (const auto *LHSProto : LHS->quals()) { 9340 bool SuperImplementsProtocol = false; 9341 for (auto *SuperClassProto : SuperClassInheritedProtocols) 9342 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 9343 SuperImplementsProtocol = true; 9344 break; 9345 } 9346 if (!SuperImplementsProtocol) 9347 return false; 9348 } 9349 } 9350 9351 // If the LHS is specialized, we may need to check type arguments. 9352 if (LHS->isSpecialized()) { 9353 // Follow the superclass chain until we've matched the LHS class in the 9354 // hierarchy. This substitutes type arguments through. 9355 const ObjCObjectType *RHSSuper = RHS; 9356 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 9357 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 9358 9359 // If the RHS is specializd, compare type arguments. 9360 if (RHSSuper->isSpecialized() && 9361 !sameObjCTypeArgs(*this, LHS->getInterface(), 9362 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 9363 /*stripKindOf=*/true)) { 9364 return false; 9365 } 9366 } 9367 9368 return true; 9369 } 9370 9371 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 9372 // get the "pointed to" types 9373 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 9374 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 9375 9376 if (!LHSOPT || !RHSOPT) 9377 return false; 9378 9379 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 9380 canAssignObjCInterfaces(RHSOPT, LHSOPT); 9381 } 9382 9383 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 9384 return canAssignObjCInterfaces( 9385 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 9386 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 9387 } 9388 9389 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 9390 /// both shall have the identically qualified version of a compatible type. 9391 /// C99 6.2.7p1: Two types have compatible types if their types are the 9392 /// same. See 6.7.[2,3,5] for additional rules. 9393 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 9394 bool CompareUnqualified) { 9395 if (getLangOpts().CPlusPlus) 9396 return hasSameType(LHS, RHS); 9397 9398 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 9399 } 9400 9401 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 9402 return typesAreCompatible(LHS, RHS); 9403 } 9404 9405 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 9406 return !mergeTypes(LHS, RHS, true).isNull(); 9407 } 9408 9409 /// mergeTransparentUnionType - if T is a transparent union type and a member 9410 /// of T is compatible with SubType, return the merged type, else return 9411 /// QualType() 9412 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 9413 bool OfBlockPointer, 9414 bool Unqualified) { 9415 if (const RecordType *UT = T->getAsUnionType()) { 9416 RecordDecl *UD = UT->getDecl(); 9417 if (UD->hasAttr<TransparentUnionAttr>()) { 9418 for (const auto *I : UD->fields()) { 9419 QualType ET = I->getType().getUnqualifiedType(); 9420 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 9421 if (!MT.isNull()) 9422 return MT; 9423 } 9424 } 9425 } 9426 9427 return {}; 9428 } 9429 9430 /// mergeFunctionParameterTypes - merge two types which appear as function 9431 /// parameter types 9432 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 9433 bool OfBlockPointer, 9434 bool Unqualified) { 9435 // GNU extension: two types are compatible if they appear as a function 9436 // argument, one of the types is a transparent union type and the other 9437 // type is compatible with a union member 9438 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 9439 Unqualified); 9440 if (!lmerge.isNull()) 9441 return lmerge; 9442 9443 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 9444 Unqualified); 9445 if (!rmerge.isNull()) 9446 return rmerge; 9447 9448 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 9449 } 9450 9451 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 9452 bool OfBlockPointer, bool Unqualified, 9453 bool AllowCXX) { 9454 const auto *lbase = lhs->castAs<FunctionType>(); 9455 const auto *rbase = rhs->castAs<FunctionType>(); 9456 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 9457 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 9458 bool allLTypes = true; 9459 bool allRTypes = true; 9460 9461 // Check return type 9462 QualType retType; 9463 if (OfBlockPointer) { 9464 QualType RHS = rbase->getReturnType(); 9465 QualType LHS = lbase->getReturnType(); 9466 bool UnqualifiedResult = Unqualified; 9467 if (!UnqualifiedResult) 9468 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 9469 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 9470 } 9471 else 9472 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 9473 Unqualified); 9474 if (retType.isNull()) 9475 return {}; 9476 9477 if (Unqualified) 9478 retType = retType.getUnqualifiedType(); 9479 9480 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 9481 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 9482 if (Unqualified) { 9483 LRetType = LRetType.getUnqualifiedType(); 9484 RRetType = RRetType.getUnqualifiedType(); 9485 } 9486 9487 if (getCanonicalType(retType) != LRetType) 9488 allLTypes = false; 9489 if (getCanonicalType(retType) != RRetType) 9490 allRTypes = false; 9491 9492 // FIXME: double check this 9493 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 9494 // rbase->getRegParmAttr() != 0 && 9495 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 9496 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 9497 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 9498 9499 // Compatible functions must have compatible calling conventions 9500 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 9501 return {}; 9502 9503 // Regparm is part of the calling convention. 9504 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 9505 return {}; 9506 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 9507 return {}; 9508 9509 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 9510 return {}; 9511 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 9512 return {}; 9513 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 9514 return {}; 9515 9516 // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. 9517 bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 9518 9519 if (lbaseInfo.getNoReturn() != NoReturn) 9520 allLTypes = false; 9521 if (rbaseInfo.getNoReturn() != NoReturn) 9522 allRTypes = false; 9523 9524 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 9525 9526 if (lproto && rproto) { // two C99 style function prototypes 9527 assert((AllowCXX || 9528 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 9529 "C++ shouldn't be here"); 9530 // Compatible functions must have the same number of parameters 9531 if (lproto->getNumParams() != rproto->getNumParams()) 9532 return {}; 9533 9534 // Variadic and non-variadic functions aren't compatible 9535 if (lproto->isVariadic() != rproto->isVariadic()) 9536 return {}; 9537 9538 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 9539 return {}; 9540 9541 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 9542 bool canUseLeft, canUseRight; 9543 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 9544 newParamInfos)) 9545 return {}; 9546 9547 if (!canUseLeft) 9548 allLTypes = false; 9549 if (!canUseRight) 9550 allRTypes = false; 9551 9552 // Check parameter type compatibility 9553 SmallVector<QualType, 10> types; 9554 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 9555 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 9556 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 9557 QualType paramType = mergeFunctionParameterTypes( 9558 lParamType, rParamType, OfBlockPointer, Unqualified); 9559 if (paramType.isNull()) 9560 return {}; 9561 9562 if (Unqualified) 9563 paramType = paramType.getUnqualifiedType(); 9564 9565 types.push_back(paramType); 9566 if (Unqualified) { 9567 lParamType = lParamType.getUnqualifiedType(); 9568 rParamType = rParamType.getUnqualifiedType(); 9569 } 9570 9571 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 9572 allLTypes = false; 9573 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 9574 allRTypes = false; 9575 } 9576 9577 if (allLTypes) return lhs; 9578 if (allRTypes) return rhs; 9579 9580 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 9581 EPI.ExtInfo = einfo; 9582 EPI.ExtParameterInfos = 9583 newParamInfos.empty() ? nullptr : newParamInfos.data(); 9584 return getFunctionType(retType, types, EPI); 9585 } 9586 9587 if (lproto) allRTypes = false; 9588 if (rproto) allLTypes = false; 9589 9590 const FunctionProtoType *proto = lproto ? lproto : rproto; 9591 if (proto) { 9592 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 9593 if (proto->isVariadic()) 9594 return {}; 9595 // Check that the types are compatible with the types that 9596 // would result from default argument promotions (C99 6.7.5.3p15). 9597 // The only types actually affected are promotable integer 9598 // types and floats, which would be passed as a different 9599 // type depending on whether the prototype is visible. 9600 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 9601 QualType paramTy = proto->getParamType(i); 9602 9603 // Look at the converted type of enum types, since that is the type used 9604 // to pass enum values. 9605 if (const auto *Enum = paramTy->getAs<EnumType>()) { 9606 paramTy = Enum->getDecl()->getIntegerType(); 9607 if (paramTy.isNull()) 9608 return {}; 9609 } 9610 9611 if (paramTy->isPromotableIntegerType() || 9612 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 9613 return {}; 9614 } 9615 9616 if (allLTypes) return lhs; 9617 if (allRTypes) return rhs; 9618 9619 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 9620 EPI.ExtInfo = einfo; 9621 return getFunctionType(retType, proto->getParamTypes(), EPI); 9622 } 9623 9624 if (allLTypes) return lhs; 9625 if (allRTypes) return rhs; 9626 return getFunctionNoProtoType(retType, einfo); 9627 } 9628 9629 /// Given that we have an enum type and a non-enum type, try to merge them. 9630 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 9631 QualType other, bool isBlockReturnType) { 9632 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 9633 // a signed integer type, or an unsigned integer type. 9634 // Compatibility is based on the underlying type, not the promotion 9635 // type. 9636 QualType underlyingType = ET->getDecl()->getIntegerType(); 9637 if (underlyingType.isNull()) 9638 return {}; 9639 if (Context.hasSameType(underlyingType, other)) 9640 return other; 9641 9642 // In block return types, we're more permissive and accept any 9643 // integral type of the same size. 9644 if (isBlockReturnType && other->isIntegerType() && 9645 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 9646 return other; 9647 9648 return {}; 9649 } 9650 9651 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, 9652 bool OfBlockPointer, 9653 bool Unqualified, bool BlockReturnType) { 9654 // For C++ we will not reach this code with reference types (see below), 9655 // for OpenMP variant call overloading we might. 9656 // 9657 // C++ [expr]: If an expression initially has the type "reference to T", the 9658 // type is adjusted to "T" prior to any further analysis, the expression 9659 // designates the object or function denoted by the reference, and the 9660 // expression is an lvalue unless the reference is an rvalue reference and 9661 // the expression is a function call (possibly inside parentheses). 9662 if (LangOpts.OpenMP && LHS->getAs<ReferenceType>() && 9663 RHS->getAs<ReferenceType>() && LHS->getTypeClass() == RHS->getTypeClass()) 9664 return mergeTypes(LHS->getAs<ReferenceType>()->getPointeeType(), 9665 RHS->getAs<ReferenceType>()->getPointeeType(), 9666 OfBlockPointer, Unqualified, BlockReturnType); 9667 if (LHS->getAs<ReferenceType>() || RHS->getAs<ReferenceType>()) 9668 return {}; 9669 9670 if (Unqualified) { 9671 LHS = LHS.getUnqualifiedType(); 9672 RHS = RHS.getUnqualifiedType(); 9673 } 9674 9675 QualType LHSCan = getCanonicalType(LHS), 9676 RHSCan = getCanonicalType(RHS); 9677 9678 // If two types are identical, they are compatible. 9679 if (LHSCan == RHSCan) 9680 return LHS; 9681 9682 // If the qualifiers are different, the types aren't compatible... mostly. 9683 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 9684 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 9685 if (LQuals != RQuals) { 9686 // If any of these qualifiers are different, we have a type 9687 // mismatch. 9688 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 9689 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 9690 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 9691 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 9692 return {}; 9693 9694 // Exactly one GC qualifier difference is allowed: __strong is 9695 // okay if the other type has no GC qualifier but is an Objective 9696 // C object pointer (i.e. implicitly strong by default). We fix 9697 // this by pretending that the unqualified type was actually 9698 // qualified __strong. 9699 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 9700 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 9701 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 9702 9703 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 9704 return {}; 9705 9706 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 9707 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 9708 } 9709 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 9710 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 9711 } 9712 return {}; 9713 } 9714 9715 // Okay, qualifiers are equal. 9716 9717 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 9718 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 9719 9720 // We want to consider the two function types to be the same for these 9721 // comparisons, just force one to the other. 9722 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 9723 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 9724 9725 // Same as above for arrays 9726 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 9727 LHSClass = Type::ConstantArray; 9728 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 9729 RHSClass = Type::ConstantArray; 9730 9731 // ObjCInterfaces are just specialized ObjCObjects. 9732 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 9733 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 9734 9735 // Canonicalize ExtVector -> Vector. 9736 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 9737 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 9738 9739 // If the canonical type classes don't match. 9740 if (LHSClass != RHSClass) { 9741 // Note that we only have special rules for turning block enum 9742 // returns into block int returns, not vice-versa. 9743 if (const auto *ETy = LHS->getAs<EnumType>()) { 9744 return mergeEnumWithInteger(*this, ETy, RHS, false); 9745 } 9746 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 9747 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 9748 } 9749 // allow block pointer type to match an 'id' type. 9750 if (OfBlockPointer && !BlockReturnType) { 9751 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 9752 return LHS; 9753 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 9754 return RHS; 9755 } 9756 9757 return {}; 9758 } 9759 9760 // The canonical type classes match. 9761 switch (LHSClass) { 9762 #define TYPE(Class, Base) 9763 #define ABSTRACT_TYPE(Class, Base) 9764 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 9765 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 9766 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 9767 #include "clang/AST/TypeNodes.inc" 9768 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 9769 9770 case Type::Auto: 9771 case Type::DeducedTemplateSpecialization: 9772 case Type::LValueReference: 9773 case Type::RValueReference: 9774 case Type::MemberPointer: 9775 llvm_unreachable("C++ should never be in mergeTypes"); 9776 9777 case Type::ObjCInterface: 9778 case Type::IncompleteArray: 9779 case Type::VariableArray: 9780 case Type::FunctionProto: 9781 case Type::ExtVector: 9782 llvm_unreachable("Types are eliminated above"); 9783 9784 case Type::Pointer: 9785 { 9786 // Merge two pointer types, while trying to preserve typedef info 9787 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 9788 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 9789 if (Unqualified) { 9790 LHSPointee = LHSPointee.getUnqualifiedType(); 9791 RHSPointee = RHSPointee.getUnqualifiedType(); 9792 } 9793 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 9794 Unqualified); 9795 if (ResultType.isNull()) 9796 return {}; 9797 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 9798 return LHS; 9799 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 9800 return RHS; 9801 return getPointerType(ResultType); 9802 } 9803 case Type::BlockPointer: 9804 { 9805 // Merge two block pointer types, while trying to preserve typedef info 9806 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 9807 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 9808 if (Unqualified) { 9809 LHSPointee = LHSPointee.getUnqualifiedType(); 9810 RHSPointee = RHSPointee.getUnqualifiedType(); 9811 } 9812 if (getLangOpts().OpenCL) { 9813 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 9814 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 9815 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 9816 // 6.12.5) thus the following check is asymmetric. 9817 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 9818 return {}; 9819 LHSPteeQual.removeAddressSpace(); 9820 RHSPteeQual.removeAddressSpace(); 9821 LHSPointee = 9822 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 9823 RHSPointee = 9824 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 9825 } 9826 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 9827 Unqualified); 9828 if (ResultType.isNull()) 9829 return {}; 9830 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 9831 return LHS; 9832 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 9833 return RHS; 9834 return getBlockPointerType(ResultType); 9835 } 9836 case Type::Atomic: 9837 { 9838 // Merge two pointer types, while trying to preserve typedef info 9839 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 9840 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 9841 if (Unqualified) { 9842 LHSValue = LHSValue.getUnqualifiedType(); 9843 RHSValue = RHSValue.getUnqualifiedType(); 9844 } 9845 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 9846 Unqualified); 9847 if (ResultType.isNull()) 9848 return {}; 9849 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 9850 return LHS; 9851 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 9852 return RHS; 9853 return getAtomicType(ResultType); 9854 } 9855 case Type::ConstantArray: 9856 { 9857 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 9858 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 9859 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 9860 return {}; 9861 9862 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 9863 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 9864 if (Unqualified) { 9865 LHSElem = LHSElem.getUnqualifiedType(); 9866 RHSElem = RHSElem.getUnqualifiedType(); 9867 } 9868 9869 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 9870 if (ResultType.isNull()) 9871 return {}; 9872 9873 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 9874 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 9875 9876 // If either side is a variable array, and both are complete, check whether 9877 // the current dimension is definite. 9878 if (LVAT || RVAT) { 9879 auto SizeFetch = [this](const VariableArrayType* VAT, 9880 const ConstantArrayType* CAT) 9881 -> std::pair<bool,llvm::APInt> { 9882 if (VAT) { 9883 Optional<llvm::APSInt> TheInt; 9884 Expr *E = VAT->getSizeExpr(); 9885 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 9886 return std::make_pair(true, *TheInt); 9887 return std::make_pair(false, llvm::APSInt()); 9888 } 9889 if (CAT) 9890 return std::make_pair(true, CAT->getSize()); 9891 return std::make_pair(false, llvm::APInt()); 9892 }; 9893 9894 bool HaveLSize, HaveRSize; 9895 llvm::APInt LSize, RSize; 9896 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 9897 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 9898 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 9899 return {}; // Definite, but unequal, array dimension 9900 } 9901 9902 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 9903 return LHS; 9904 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 9905 return RHS; 9906 if (LCAT) 9907 return getConstantArrayType(ResultType, LCAT->getSize(), 9908 LCAT->getSizeExpr(), 9909 ArrayType::ArraySizeModifier(), 0); 9910 if (RCAT) 9911 return getConstantArrayType(ResultType, RCAT->getSize(), 9912 RCAT->getSizeExpr(), 9913 ArrayType::ArraySizeModifier(), 0); 9914 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 9915 return LHS; 9916 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 9917 return RHS; 9918 if (LVAT) { 9919 // FIXME: This isn't correct! But tricky to implement because 9920 // the array's size has to be the size of LHS, but the type 9921 // has to be different. 9922 return LHS; 9923 } 9924 if (RVAT) { 9925 // FIXME: This isn't correct! But tricky to implement because 9926 // the array's size has to be the size of RHS, but the type 9927 // has to be different. 9928 return RHS; 9929 } 9930 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 9931 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 9932 return getIncompleteArrayType(ResultType, 9933 ArrayType::ArraySizeModifier(), 0); 9934 } 9935 case Type::FunctionNoProto: 9936 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); 9937 case Type::Record: 9938 case Type::Enum: 9939 return {}; 9940 case Type::Builtin: 9941 // Only exactly equal builtin types are compatible, which is tested above. 9942 return {}; 9943 case Type::Complex: 9944 // Distinct complex types are incompatible. 9945 return {}; 9946 case Type::Vector: 9947 // FIXME: The merged type should be an ExtVector! 9948 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 9949 RHSCan->castAs<VectorType>())) 9950 return LHS; 9951 return {}; 9952 case Type::ConstantMatrix: 9953 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 9954 RHSCan->castAs<ConstantMatrixType>())) 9955 return LHS; 9956 return {}; 9957 case Type::ObjCObject: { 9958 // Check if the types are assignment compatible. 9959 // FIXME: This should be type compatibility, e.g. whether 9960 // "LHS x; RHS x;" at global scope is legal. 9961 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 9962 RHS->castAs<ObjCObjectType>())) 9963 return LHS; 9964 return {}; 9965 } 9966 case Type::ObjCObjectPointer: 9967 if (OfBlockPointer) { 9968 if (canAssignObjCInterfacesInBlockPointer( 9969 LHS->castAs<ObjCObjectPointerType>(), 9970 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 9971 return LHS; 9972 return {}; 9973 } 9974 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 9975 RHS->castAs<ObjCObjectPointerType>())) 9976 return LHS; 9977 return {}; 9978 case Type::Pipe: 9979 assert(LHS != RHS && 9980 "Equivalent pipe types should have already been handled!"); 9981 return {}; 9982 case Type::ExtInt: { 9983 // Merge two ext-int types, while trying to preserve typedef info. 9984 bool LHSUnsigned = LHS->castAs<ExtIntType>()->isUnsigned(); 9985 bool RHSUnsigned = RHS->castAs<ExtIntType>()->isUnsigned(); 9986 unsigned LHSBits = LHS->castAs<ExtIntType>()->getNumBits(); 9987 unsigned RHSBits = RHS->castAs<ExtIntType>()->getNumBits(); 9988 9989 // Like unsigned/int, shouldn't have a type if they dont match. 9990 if (LHSUnsigned != RHSUnsigned) 9991 return {}; 9992 9993 if (LHSBits != RHSBits) 9994 return {}; 9995 return LHS; 9996 } 9997 } 9998 9999 llvm_unreachable("Invalid Type::Class!"); 10000 } 10001 10002 bool ASTContext::mergeExtParameterInfo( 10003 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10004 bool &CanUseFirst, bool &CanUseSecond, 10005 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10006 assert(NewParamInfos.empty() && "param info list not empty"); 10007 CanUseFirst = CanUseSecond = true; 10008 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10009 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10010 10011 // Fast path: if the first type doesn't have ext parameter infos, 10012 // we match if and only if the second type also doesn't have them. 10013 if (!FirstHasInfo && !SecondHasInfo) 10014 return true; 10015 10016 bool NeedParamInfo = false; 10017 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10018 : SecondFnType->getExtParameterInfos().size(); 10019 10020 for (size_t I = 0; I < E; ++I) { 10021 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10022 if (FirstHasInfo) 10023 FirstParam = FirstFnType->getExtParameterInfo(I); 10024 if (SecondHasInfo) 10025 SecondParam = SecondFnType->getExtParameterInfo(I); 10026 10027 // Cannot merge unless everything except the noescape flag matches. 10028 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10029 return false; 10030 10031 bool FirstNoEscape = FirstParam.isNoEscape(); 10032 bool SecondNoEscape = SecondParam.isNoEscape(); 10033 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10034 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10035 if (NewParamInfos.back().getOpaqueValue()) 10036 NeedParamInfo = true; 10037 if (FirstNoEscape != IsNoEscape) 10038 CanUseFirst = false; 10039 if (SecondNoEscape != IsNoEscape) 10040 CanUseSecond = false; 10041 } 10042 10043 if (!NeedParamInfo) 10044 NewParamInfos.clear(); 10045 10046 return true; 10047 } 10048 10049 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10050 ObjCLayouts[CD] = nullptr; 10051 } 10052 10053 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10054 /// 'RHS' attributes and returns the merged version; including for function 10055 /// return types. 10056 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10057 QualType LHSCan = getCanonicalType(LHS), 10058 RHSCan = getCanonicalType(RHS); 10059 // If two types are identical, they are compatible. 10060 if (LHSCan == RHSCan) 10061 return LHS; 10062 if (RHSCan->isFunctionType()) { 10063 if (!LHSCan->isFunctionType()) 10064 return {}; 10065 QualType OldReturnType = 10066 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10067 QualType NewReturnType = 10068 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10069 QualType ResReturnType = 10070 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10071 if (ResReturnType.isNull()) 10072 return {}; 10073 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10074 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10075 // In either case, use OldReturnType to build the new function type. 10076 const auto *F = LHS->castAs<FunctionType>(); 10077 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10078 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10079 EPI.ExtInfo = getFunctionExtInfo(LHS); 10080 QualType ResultType = 10081 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10082 return ResultType; 10083 } 10084 } 10085 return {}; 10086 } 10087 10088 // If the qualifiers are different, the types can still be merged. 10089 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10090 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10091 if (LQuals != RQuals) { 10092 // If any of these qualifiers are different, we have a type mismatch. 10093 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10094 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10095 return {}; 10096 10097 // Exactly one GC qualifier difference is allowed: __strong is 10098 // okay if the other type has no GC qualifier but is an Objective 10099 // C object pointer (i.e. implicitly strong by default). We fix 10100 // this by pretending that the unqualified type was actually 10101 // qualified __strong. 10102 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10103 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10104 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10105 10106 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10107 return {}; 10108 10109 if (GC_L == Qualifiers::Strong) 10110 return LHS; 10111 if (GC_R == Qualifiers::Strong) 10112 return RHS; 10113 return {}; 10114 } 10115 10116 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10117 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10118 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10119 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10120 if (ResQT == LHSBaseQT) 10121 return LHS; 10122 if (ResQT == RHSBaseQT) 10123 return RHS; 10124 } 10125 return {}; 10126 } 10127 10128 //===----------------------------------------------------------------------===// 10129 // Integer Predicates 10130 //===----------------------------------------------------------------------===// 10131 10132 unsigned ASTContext::getIntWidth(QualType T) const { 10133 if (const auto *ET = T->getAs<EnumType>()) 10134 T = ET->getDecl()->getIntegerType(); 10135 if (T->isBooleanType()) 10136 return 1; 10137 if(const auto *EIT = T->getAs<ExtIntType>()) 10138 return EIT->getNumBits(); 10139 // For builtin types, just use the standard type sizing method 10140 return (unsigned)getTypeSize(T); 10141 } 10142 10143 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10144 assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 10145 "Unexpected type"); 10146 10147 // Turn <4 x signed int> -> <4 x unsigned int> 10148 if (const auto *VTy = T->getAs<VectorType>()) 10149 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 10150 VTy->getNumElements(), VTy->getVectorKind()); 10151 10152 // For _ExtInt, return an unsigned _ExtInt with same width. 10153 if (const auto *EITy = T->getAs<ExtIntType>()) 10154 return getExtIntType(/*IsUnsigned=*/true, EITy->getNumBits()); 10155 10156 // For enums, get the underlying integer type of the enum, and let the general 10157 // integer type signchanging code handle it. 10158 if (const auto *ETy = T->getAs<EnumType>()) 10159 T = ETy->getDecl()->getIntegerType(); 10160 10161 switch (T->castAs<BuiltinType>()->getKind()) { 10162 case BuiltinType::Char_S: 10163 case BuiltinType::SChar: 10164 return UnsignedCharTy; 10165 case BuiltinType::Short: 10166 return UnsignedShortTy; 10167 case BuiltinType::Int: 10168 return UnsignedIntTy; 10169 case BuiltinType::Long: 10170 return UnsignedLongTy; 10171 case BuiltinType::LongLong: 10172 return UnsignedLongLongTy; 10173 case BuiltinType::Int128: 10174 return UnsignedInt128Ty; 10175 // wchar_t is special. It is either signed or not, but when it's signed, 10176 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 10177 // version of it's underlying type instead. 10178 case BuiltinType::WChar_S: 10179 return getUnsignedWCharType(); 10180 10181 case BuiltinType::ShortAccum: 10182 return UnsignedShortAccumTy; 10183 case BuiltinType::Accum: 10184 return UnsignedAccumTy; 10185 case BuiltinType::LongAccum: 10186 return UnsignedLongAccumTy; 10187 case BuiltinType::SatShortAccum: 10188 return SatUnsignedShortAccumTy; 10189 case BuiltinType::SatAccum: 10190 return SatUnsignedAccumTy; 10191 case BuiltinType::SatLongAccum: 10192 return SatUnsignedLongAccumTy; 10193 case BuiltinType::ShortFract: 10194 return UnsignedShortFractTy; 10195 case BuiltinType::Fract: 10196 return UnsignedFractTy; 10197 case BuiltinType::LongFract: 10198 return UnsignedLongFractTy; 10199 case BuiltinType::SatShortFract: 10200 return SatUnsignedShortFractTy; 10201 case BuiltinType::SatFract: 10202 return SatUnsignedFractTy; 10203 case BuiltinType::SatLongFract: 10204 return SatUnsignedLongFractTy; 10205 default: 10206 llvm_unreachable("Unexpected signed integer or fixed point type"); 10207 } 10208 } 10209 10210 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 10211 assert((T->hasUnsignedIntegerRepresentation() || 10212 T->isUnsignedFixedPointType()) && 10213 "Unexpected type"); 10214 10215 // Turn <4 x unsigned int> -> <4 x signed int> 10216 if (const auto *VTy = T->getAs<VectorType>()) 10217 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 10218 VTy->getNumElements(), VTy->getVectorKind()); 10219 10220 // For _ExtInt, return a signed _ExtInt with same width. 10221 if (const auto *EITy = T->getAs<ExtIntType>()) 10222 return getExtIntType(/*IsUnsigned=*/false, EITy->getNumBits()); 10223 10224 // For enums, get the underlying integer type of the enum, and let the general 10225 // integer type signchanging code handle it. 10226 if (const auto *ETy = T->getAs<EnumType>()) 10227 T = ETy->getDecl()->getIntegerType(); 10228 10229 switch (T->castAs<BuiltinType>()->getKind()) { 10230 case BuiltinType::Char_U: 10231 case BuiltinType::UChar: 10232 return SignedCharTy; 10233 case BuiltinType::UShort: 10234 return ShortTy; 10235 case BuiltinType::UInt: 10236 return IntTy; 10237 case BuiltinType::ULong: 10238 return LongTy; 10239 case BuiltinType::ULongLong: 10240 return LongLongTy; 10241 case BuiltinType::UInt128: 10242 return Int128Ty; 10243 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 10244 // there's no matching "signed wchar_t". Therefore we return the signed 10245 // version of it's underlying type instead. 10246 case BuiltinType::WChar_U: 10247 return getSignedWCharType(); 10248 10249 case BuiltinType::UShortAccum: 10250 return ShortAccumTy; 10251 case BuiltinType::UAccum: 10252 return AccumTy; 10253 case BuiltinType::ULongAccum: 10254 return LongAccumTy; 10255 case BuiltinType::SatUShortAccum: 10256 return SatShortAccumTy; 10257 case BuiltinType::SatUAccum: 10258 return SatAccumTy; 10259 case BuiltinType::SatULongAccum: 10260 return SatLongAccumTy; 10261 case BuiltinType::UShortFract: 10262 return ShortFractTy; 10263 case BuiltinType::UFract: 10264 return FractTy; 10265 case BuiltinType::ULongFract: 10266 return LongFractTy; 10267 case BuiltinType::SatUShortFract: 10268 return SatShortFractTy; 10269 case BuiltinType::SatUFract: 10270 return SatFractTy; 10271 case BuiltinType::SatULongFract: 10272 return SatLongFractTy; 10273 default: 10274 llvm_unreachable("Unexpected unsigned integer or fixed point type"); 10275 } 10276 } 10277 10278 ASTMutationListener::~ASTMutationListener() = default; 10279 10280 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 10281 QualType ReturnType) {} 10282 10283 //===----------------------------------------------------------------------===// 10284 // Builtin Type Computation 10285 //===----------------------------------------------------------------------===// 10286 10287 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 10288 /// pointer over the consumed characters. This returns the resultant type. If 10289 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 10290 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 10291 /// a vector of "i*". 10292 /// 10293 /// RequiresICE is filled in on return to indicate whether the value is required 10294 /// to be an Integer Constant Expression. 10295 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 10296 ASTContext::GetBuiltinTypeError &Error, 10297 bool &RequiresICE, 10298 bool AllowTypeModifiers) { 10299 // Modifiers. 10300 int HowLong = 0; 10301 bool Signed = false, Unsigned = false; 10302 RequiresICE = false; 10303 10304 // Read the prefixed modifiers first. 10305 bool Done = false; 10306 #ifndef NDEBUG 10307 bool IsSpecial = false; 10308 #endif 10309 while (!Done) { 10310 switch (*Str++) { 10311 default: Done = true; --Str; break; 10312 case 'I': 10313 RequiresICE = true; 10314 break; 10315 case 'S': 10316 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 10317 assert(!Signed && "Can't use 'S' modifier multiple times!"); 10318 Signed = true; 10319 break; 10320 case 'U': 10321 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 10322 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 10323 Unsigned = true; 10324 break; 10325 case 'L': 10326 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 10327 assert(HowLong <= 2 && "Can't have LLLL modifier"); 10328 ++HowLong; 10329 break; 10330 case 'N': 10331 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 10332 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10333 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 10334 #ifndef NDEBUG 10335 IsSpecial = true; 10336 #endif 10337 if (Context.getTargetInfo().getLongWidth() == 32) 10338 ++HowLong; 10339 break; 10340 case 'W': 10341 // This modifier represents int64 type. 10342 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10343 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 10344 #ifndef NDEBUG 10345 IsSpecial = true; 10346 #endif 10347 switch (Context.getTargetInfo().getInt64Type()) { 10348 default: 10349 llvm_unreachable("Unexpected integer type"); 10350 case TargetInfo::SignedLong: 10351 HowLong = 1; 10352 break; 10353 case TargetInfo::SignedLongLong: 10354 HowLong = 2; 10355 break; 10356 } 10357 break; 10358 case 'Z': 10359 // This modifier represents int32 type. 10360 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10361 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 10362 #ifndef NDEBUG 10363 IsSpecial = true; 10364 #endif 10365 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 10366 default: 10367 llvm_unreachable("Unexpected integer type"); 10368 case TargetInfo::SignedInt: 10369 HowLong = 0; 10370 break; 10371 case TargetInfo::SignedLong: 10372 HowLong = 1; 10373 break; 10374 case TargetInfo::SignedLongLong: 10375 HowLong = 2; 10376 break; 10377 } 10378 break; 10379 case 'O': 10380 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10381 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 10382 #ifndef NDEBUG 10383 IsSpecial = true; 10384 #endif 10385 if (Context.getLangOpts().OpenCL) 10386 HowLong = 1; 10387 else 10388 HowLong = 2; 10389 break; 10390 } 10391 } 10392 10393 QualType Type; 10394 10395 // Read the base type. 10396 switch (*Str++) { 10397 default: llvm_unreachable("Unknown builtin type letter!"); 10398 case 'x': 10399 assert(HowLong == 0 && !Signed && !Unsigned && 10400 "Bad modifiers used with 'x'!"); 10401 Type = Context.Float16Ty; 10402 break; 10403 case 'y': 10404 assert(HowLong == 0 && !Signed && !Unsigned && 10405 "Bad modifiers used with 'y'!"); 10406 Type = Context.BFloat16Ty; 10407 break; 10408 case 'v': 10409 assert(HowLong == 0 && !Signed && !Unsigned && 10410 "Bad modifiers used with 'v'!"); 10411 Type = Context.VoidTy; 10412 break; 10413 case 'h': 10414 assert(HowLong == 0 && !Signed && !Unsigned && 10415 "Bad modifiers used with 'h'!"); 10416 Type = Context.HalfTy; 10417 break; 10418 case 'f': 10419 assert(HowLong == 0 && !Signed && !Unsigned && 10420 "Bad modifiers used with 'f'!"); 10421 Type = Context.FloatTy; 10422 break; 10423 case 'd': 10424 assert(HowLong < 3 && !Signed && !Unsigned && 10425 "Bad modifiers used with 'd'!"); 10426 if (HowLong == 1) 10427 Type = Context.LongDoubleTy; 10428 else if (HowLong == 2) 10429 Type = Context.Float128Ty; 10430 else 10431 Type = Context.DoubleTy; 10432 break; 10433 case 's': 10434 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 10435 if (Unsigned) 10436 Type = Context.UnsignedShortTy; 10437 else 10438 Type = Context.ShortTy; 10439 break; 10440 case 'i': 10441 if (HowLong == 3) 10442 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 10443 else if (HowLong == 2) 10444 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 10445 else if (HowLong == 1) 10446 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 10447 else 10448 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 10449 break; 10450 case 'c': 10451 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 10452 if (Signed) 10453 Type = Context.SignedCharTy; 10454 else if (Unsigned) 10455 Type = Context.UnsignedCharTy; 10456 else 10457 Type = Context.CharTy; 10458 break; 10459 case 'b': // boolean 10460 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 10461 Type = Context.BoolTy; 10462 break; 10463 case 'z': // size_t. 10464 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 10465 Type = Context.getSizeType(); 10466 break; 10467 case 'w': // wchar_t. 10468 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 10469 Type = Context.getWideCharType(); 10470 break; 10471 case 'F': 10472 Type = Context.getCFConstantStringType(); 10473 break; 10474 case 'G': 10475 Type = Context.getObjCIdType(); 10476 break; 10477 case 'H': 10478 Type = Context.getObjCSelType(); 10479 break; 10480 case 'M': 10481 Type = Context.getObjCSuperType(); 10482 break; 10483 case 'a': 10484 Type = Context.getBuiltinVaListType(); 10485 assert(!Type.isNull() && "builtin va list type not initialized!"); 10486 break; 10487 case 'A': 10488 // This is a "reference" to a va_list; however, what exactly 10489 // this means depends on how va_list is defined. There are two 10490 // different kinds of va_list: ones passed by value, and ones 10491 // passed by reference. An example of a by-value va_list is 10492 // x86, where va_list is a char*. An example of by-ref va_list 10493 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 10494 // we want this argument to be a char*&; for x86-64, we want 10495 // it to be a __va_list_tag*. 10496 Type = Context.getBuiltinVaListType(); 10497 assert(!Type.isNull() && "builtin va list type not initialized!"); 10498 if (Type->isArrayType()) 10499 Type = Context.getArrayDecayedType(Type); 10500 else 10501 Type = Context.getLValueReferenceType(Type); 10502 break; 10503 case 'q': { 10504 char *End; 10505 unsigned NumElements = strtoul(Str, &End, 10); 10506 assert(End != Str && "Missing vector size"); 10507 Str = End; 10508 10509 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 10510 RequiresICE, false); 10511 assert(!RequiresICE && "Can't require vector ICE"); 10512 10513 Type = Context.getScalableVectorType(ElementType, NumElements); 10514 break; 10515 } 10516 case 'V': { 10517 char *End; 10518 unsigned NumElements = strtoul(Str, &End, 10); 10519 assert(End != Str && "Missing vector size"); 10520 Str = End; 10521 10522 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 10523 RequiresICE, false); 10524 assert(!RequiresICE && "Can't require vector ICE"); 10525 10526 // TODO: No way to make AltiVec vectors in builtins yet. 10527 Type = Context.getVectorType(ElementType, NumElements, 10528 VectorType::GenericVector); 10529 break; 10530 } 10531 case 'E': { 10532 char *End; 10533 10534 unsigned NumElements = strtoul(Str, &End, 10); 10535 assert(End != Str && "Missing vector size"); 10536 10537 Str = End; 10538 10539 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 10540 false); 10541 Type = Context.getExtVectorType(ElementType, NumElements); 10542 break; 10543 } 10544 case 'X': { 10545 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 10546 false); 10547 assert(!RequiresICE && "Can't require complex ICE"); 10548 Type = Context.getComplexType(ElementType); 10549 break; 10550 } 10551 case 'Y': 10552 Type = Context.getPointerDiffType(); 10553 break; 10554 case 'P': 10555 Type = Context.getFILEType(); 10556 if (Type.isNull()) { 10557 Error = ASTContext::GE_Missing_stdio; 10558 return {}; 10559 } 10560 break; 10561 case 'J': 10562 if (Signed) 10563 Type = Context.getsigjmp_bufType(); 10564 else 10565 Type = Context.getjmp_bufType(); 10566 10567 if (Type.isNull()) { 10568 Error = ASTContext::GE_Missing_setjmp; 10569 return {}; 10570 } 10571 break; 10572 case 'K': 10573 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 10574 Type = Context.getucontext_tType(); 10575 10576 if (Type.isNull()) { 10577 Error = ASTContext::GE_Missing_ucontext; 10578 return {}; 10579 } 10580 break; 10581 case 'p': 10582 Type = Context.getProcessIDType(); 10583 break; 10584 } 10585 10586 // If there are modifiers and if we're allowed to parse them, go for it. 10587 Done = !AllowTypeModifiers; 10588 while (!Done) { 10589 switch (char c = *Str++) { 10590 default: Done = true; --Str; break; 10591 case '*': 10592 case '&': { 10593 // Both pointers and references can have their pointee types 10594 // qualified with an address space. 10595 char *End; 10596 unsigned AddrSpace = strtoul(Str, &End, 10); 10597 if (End != Str) { 10598 // Note AddrSpace == 0 is not the same as an unspecified address space. 10599 Type = Context.getAddrSpaceQualType( 10600 Type, 10601 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 10602 Str = End; 10603 } 10604 if (c == '*') 10605 Type = Context.getPointerType(Type); 10606 else 10607 Type = Context.getLValueReferenceType(Type); 10608 break; 10609 } 10610 // FIXME: There's no way to have a built-in with an rvalue ref arg. 10611 case 'C': 10612 Type = Type.withConst(); 10613 break; 10614 case 'D': 10615 Type = Context.getVolatileType(Type); 10616 break; 10617 case 'R': 10618 Type = Type.withRestrict(); 10619 break; 10620 } 10621 } 10622 10623 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 10624 "Integer constant 'I' type must be an integer"); 10625 10626 return Type; 10627 } 10628 10629 // On some targets such as PowerPC, some of the builtins are defined with custom 10630 // type decriptors for target-dependent types. These descriptors are decoded in 10631 // other functions, but it may be useful to be able to fall back to default 10632 // descriptor decoding to define builtins mixing target-dependent and target- 10633 // independent types. This function allows decoding one type descriptor with 10634 // default decoding. 10635 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 10636 GetBuiltinTypeError &Error, bool &RequireICE, 10637 bool AllowTypeModifiers) const { 10638 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 10639 } 10640 10641 /// GetBuiltinType - Return the type for the specified builtin. 10642 QualType ASTContext::GetBuiltinType(unsigned Id, 10643 GetBuiltinTypeError &Error, 10644 unsigned *IntegerConstantArgs) const { 10645 const char *TypeStr = BuiltinInfo.getTypeString(Id); 10646 if (TypeStr[0] == '\0') { 10647 Error = GE_Missing_type; 10648 return {}; 10649 } 10650 10651 SmallVector<QualType, 8> ArgTypes; 10652 10653 bool RequiresICE = false; 10654 Error = GE_None; 10655 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 10656 RequiresICE, true); 10657 if (Error != GE_None) 10658 return {}; 10659 10660 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 10661 10662 while (TypeStr[0] && TypeStr[0] != '.') { 10663 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 10664 if (Error != GE_None) 10665 return {}; 10666 10667 // If this argument is required to be an IntegerConstantExpression and the 10668 // caller cares, fill in the bitmask we return. 10669 if (RequiresICE && IntegerConstantArgs) 10670 *IntegerConstantArgs |= 1 << ArgTypes.size(); 10671 10672 // Do array -> pointer decay. The builtin should use the decayed type. 10673 if (Ty->isArrayType()) 10674 Ty = getArrayDecayedType(Ty); 10675 10676 ArgTypes.push_back(Ty); 10677 } 10678 10679 if (Id == Builtin::BI__GetExceptionInfo) 10680 return {}; 10681 10682 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 10683 "'.' should only occur at end of builtin type list!"); 10684 10685 bool Variadic = (TypeStr[0] == '.'); 10686 10687 FunctionType::ExtInfo EI(getDefaultCallingConvention( 10688 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 10689 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 10690 10691 10692 // We really shouldn't be making a no-proto type here. 10693 if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus) 10694 return getFunctionNoProtoType(ResType, EI); 10695 10696 FunctionProtoType::ExtProtoInfo EPI; 10697 EPI.ExtInfo = EI; 10698 EPI.Variadic = Variadic; 10699 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 10700 EPI.ExceptionSpec.Type = 10701 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 10702 10703 return getFunctionType(ResType, ArgTypes, EPI); 10704 } 10705 10706 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 10707 const FunctionDecl *FD) { 10708 if (!FD->isExternallyVisible()) 10709 return GVA_Internal; 10710 10711 // Non-user-provided functions get emitted as weak definitions with every 10712 // use, no matter whether they've been explicitly instantiated etc. 10713 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 10714 if (!MD->isUserProvided()) 10715 return GVA_DiscardableODR; 10716 10717 GVALinkage External; 10718 switch (FD->getTemplateSpecializationKind()) { 10719 case TSK_Undeclared: 10720 case TSK_ExplicitSpecialization: 10721 External = GVA_StrongExternal; 10722 break; 10723 10724 case TSK_ExplicitInstantiationDefinition: 10725 return GVA_StrongODR; 10726 10727 // C++11 [temp.explicit]p10: 10728 // [ Note: The intent is that an inline function that is the subject of 10729 // an explicit instantiation declaration will still be implicitly 10730 // instantiated when used so that the body can be considered for 10731 // inlining, but that no out-of-line copy of the inline function would be 10732 // generated in the translation unit. -- end note ] 10733 case TSK_ExplicitInstantiationDeclaration: 10734 return GVA_AvailableExternally; 10735 10736 case TSK_ImplicitInstantiation: 10737 External = GVA_DiscardableODR; 10738 break; 10739 } 10740 10741 if (!FD->isInlined()) 10742 return External; 10743 10744 if ((!Context.getLangOpts().CPlusPlus && 10745 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 10746 !FD->hasAttr<DLLExportAttr>()) || 10747 FD->hasAttr<GNUInlineAttr>()) { 10748 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 10749 10750 // GNU or C99 inline semantics. Determine whether this symbol should be 10751 // externally visible. 10752 if (FD->isInlineDefinitionExternallyVisible()) 10753 return External; 10754 10755 // C99 inline semantics, where the symbol is not externally visible. 10756 return GVA_AvailableExternally; 10757 } 10758 10759 // Functions specified with extern and inline in -fms-compatibility mode 10760 // forcibly get emitted. While the body of the function cannot be later 10761 // replaced, the function definition cannot be discarded. 10762 if (FD->isMSExternInline()) 10763 return GVA_StrongODR; 10764 10765 return GVA_DiscardableODR; 10766 } 10767 10768 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 10769 const Decl *D, GVALinkage L) { 10770 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 10771 // dllexport/dllimport on inline functions. 10772 if (D->hasAttr<DLLImportAttr>()) { 10773 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 10774 return GVA_AvailableExternally; 10775 } else if (D->hasAttr<DLLExportAttr>()) { 10776 if (L == GVA_DiscardableODR) 10777 return GVA_StrongODR; 10778 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 10779 // Device-side functions with __global__ attribute must always be 10780 // visible externally so they can be launched from host. 10781 if (D->hasAttr<CUDAGlobalAttr>() && 10782 (L == GVA_DiscardableODR || L == GVA_Internal)) 10783 return GVA_StrongODR; 10784 // Single source offloading languages like CUDA/HIP need to be able to 10785 // access static device variables from host code of the same compilation 10786 // unit. This is done by externalizing the static variable with a shared 10787 // name between the host and device compilation which is the same for the 10788 // same compilation unit whereas different among different compilation 10789 // units. 10790 if (Context.shouldExternalizeStaticVar(D)) 10791 return GVA_StrongExternal; 10792 } 10793 return L; 10794 } 10795 10796 /// Adjust the GVALinkage for a declaration based on what an external AST source 10797 /// knows about whether there can be other definitions of this declaration. 10798 static GVALinkage 10799 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 10800 GVALinkage L) { 10801 ExternalASTSource *Source = Ctx.getExternalSource(); 10802 if (!Source) 10803 return L; 10804 10805 switch (Source->hasExternalDefinitions(D)) { 10806 case ExternalASTSource::EK_Never: 10807 // Other translation units rely on us to provide the definition. 10808 if (L == GVA_DiscardableODR) 10809 return GVA_StrongODR; 10810 break; 10811 10812 case ExternalASTSource::EK_Always: 10813 return GVA_AvailableExternally; 10814 10815 case ExternalASTSource::EK_ReplyHazy: 10816 break; 10817 } 10818 return L; 10819 } 10820 10821 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 10822 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 10823 adjustGVALinkageForAttributes(*this, FD, 10824 basicGVALinkageForFunction(*this, FD))); 10825 } 10826 10827 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 10828 const VarDecl *VD) { 10829 if (!VD->isExternallyVisible()) 10830 return GVA_Internal; 10831 10832 if (VD->isStaticLocal()) { 10833 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 10834 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 10835 LexicalContext = LexicalContext->getLexicalParent(); 10836 10837 // ObjC Blocks can create local variables that don't have a FunctionDecl 10838 // LexicalContext. 10839 if (!LexicalContext) 10840 return GVA_DiscardableODR; 10841 10842 // Otherwise, let the static local variable inherit its linkage from the 10843 // nearest enclosing function. 10844 auto StaticLocalLinkage = 10845 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 10846 10847 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 10848 // be emitted in any object with references to the symbol for the object it 10849 // contains, whether inline or out-of-line." 10850 // Similar behavior is observed with MSVC. An alternative ABI could use 10851 // StrongODR/AvailableExternally to match the function, but none are 10852 // known/supported currently. 10853 if (StaticLocalLinkage == GVA_StrongODR || 10854 StaticLocalLinkage == GVA_AvailableExternally) 10855 return GVA_DiscardableODR; 10856 return StaticLocalLinkage; 10857 } 10858 10859 // MSVC treats in-class initialized static data members as definitions. 10860 // By giving them non-strong linkage, out-of-line definitions won't 10861 // cause link errors. 10862 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 10863 return GVA_DiscardableODR; 10864 10865 // Most non-template variables have strong linkage; inline variables are 10866 // linkonce_odr or (occasionally, for compatibility) weak_odr. 10867 GVALinkage StrongLinkage; 10868 switch (Context.getInlineVariableDefinitionKind(VD)) { 10869 case ASTContext::InlineVariableDefinitionKind::None: 10870 StrongLinkage = GVA_StrongExternal; 10871 break; 10872 case ASTContext::InlineVariableDefinitionKind::Weak: 10873 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 10874 StrongLinkage = GVA_DiscardableODR; 10875 break; 10876 case ASTContext::InlineVariableDefinitionKind::Strong: 10877 StrongLinkage = GVA_StrongODR; 10878 break; 10879 } 10880 10881 switch (VD->getTemplateSpecializationKind()) { 10882 case TSK_Undeclared: 10883 return StrongLinkage; 10884 10885 case TSK_ExplicitSpecialization: 10886 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 10887 VD->isStaticDataMember() 10888 ? GVA_StrongODR 10889 : StrongLinkage; 10890 10891 case TSK_ExplicitInstantiationDefinition: 10892 return GVA_StrongODR; 10893 10894 case TSK_ExplicitInstantiationDeclaration: 10895 return GVA_AvailableExternally; 10896 10897 case TSK_ImplicitInstantiation: 10898 return GVA_DiscardableODR; 10899 } 10900 10901 llvm_unreachable("Invalid Linkage!"); 10902 } 10903 10904 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { 10905 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 10906 adjustGVALinkageForAttributes(*this, VD, 10907 basicGVALinkageForVariable(*this, VD))); 10908 } 10909 10910 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 10911 if (const auto *VD = dyn_cast<VarDecl>(D)) { 10912 if (!VD->isFileVarDecl()) 10913 return false; 10914 // Global named register variables (GNU extension) are never emitted. 10915 if (VD->getStorageClass() == SC_Register) 10916 return false; 10917 if (VD->getDescribedVarTemplate() || 10918 isa<VarTemplatePartialSpecializationDecl>(VD)) 10919 return false; 10920 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 10921 // We never need to emit an uninstantiated function template. 10922 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 10923 return false; 10924 } else if (isa<PragmaCommentDecl>(D)) 10925 return true; 10926 else if (isa<PragmaDetectMismatchDecl>(D)) 10927 return true; 10928 else if (isa<OMPRequiresDecl>(D)) 10929 return true; 10930 else if (isa<OMPThreadPrivateDecl>(D)) 10931 return !D->getDeclContext()->isDependentContext(); 10932 else if (isa<OMPAllocateDecl>(D)) 10933 return !D->getDeclContext()->isDependentContext(); 10934 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 10935 return !D->getDeclContext()->isDependentContext(); 10936 else if (isa<ImportDecl>(D)) 10937 return true; 10938 else 10939 return false; 10940 10941 // If this is a member of a class template, we do not need to emit it. 10942 if (D->getDeclContext()->isDependentContext()) 10943 return false; 10944 10945 // Weak references don't produce any output by themselves. 10946 if (D->hasAttr<WeakRefAttr>()) 10947 return false; 10948 10949 // Aliases and used decls are required. 10950 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 10951 return true; 10952 10953 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 10954 // Forward declarations aren't required. 10955 if (!FD->doesThisDeclarationHaveABody()) 10956 return FD->doesDeclarationForceExternallyVisibleDefinition(); 10957 10958 // Constructors and destructors are required. 10959 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 10960 return true; 10961 10962 // The key function for a class is required. This rule only comes 10963 // into play when inline functions can be key functions, though. 10964 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 10965 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 10966 const CXXRecordDecl *RD = MD->getParent(); 10967 if (MD->isOutOfLine() && RD->isDynamicClass()) { 10968 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 10969 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 10970 return true; 10971 } 10972 } 10973 } 10974 10975 GVALinkage Linkage = GetGVALinkageForFunction(FD); 10976 10977 // static, static inline, always_inline, and extern inline functions can 10978 // always be deferred. Normal inline functions can be deferred in C99/C++. 10979 // Implicit template instantiations can also be deferred in C++. 10980 return !isDiscardableGVALinkage(Linkage); 10981 } 10982 10983 const auto *VD = cast<VarDecl>(D); 10984 assert(VD->isFileVarDecl() && "Expected file scoped var"); 10985 10986 // If the decl is marked as `declare target to`, it should be emitted for the 10987 // host and for the device. 10988 if (LangOpts.OpenMP && 10989 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 10990 return true; 10991 10992 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 10993 !isMSStaticDataMemberInlineDefinition(VD)) 10994 return false; 10995 10996 // Variables that can be needed in other TUs are required. 10997 auto Linkage = GetGVALinkageForVariable(VD); 10998 if (!isDiscardableGVALinkage(Linkage)) 10999 return true; 11000 11001 // We never need to emit a variable that is available in another TU. 11002 if (Linkage == GVA_AvailableExternally) 11003 return false; 11004 11005 // Variables that have destruction with side-effects are required. 11006 if (VD->needsDestruction(*this)) 11007 return true; 11008 11009 // Variables that have initialization with side-effects are required. 11010 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11011 // We can get a value-dependent initializer during error recovery. 11012 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11013 return true; 11014 11015 // Likewise, variables with tuple-like bindings are required if their 11016 // bindings have side-effects. 11017 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11018 for (const auto *BD : DD->bindings()) 11019 if (const auto *BindingVD = BD->getHoldingVar()) 11020 if (DeclMustBeEmitted(BindingVD)) 11021 return true; 11022 11023 return false; 11024 } 11025 11026 void ASTContext::forEachMultiversionedFunctionVersion( 11027 const FunctionDecl *FD, 11028 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11029 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11030 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11031 FD = FD->getMostRecentDecl(); 11032 // FIXME: The order of traversal here matters and depends on the order of 11033 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11034 // shouldn't rely on that. 11035 for (auto *CurDecl : 11036 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11037 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11038 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11039 std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) { 11040 SeenDecls.insert(CurFD); 11041 Pred(CurFD); 11042 } 11043 } 11044 } 11045 11046 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11047 bool IsCXXMethod, 11048 bool IsBuiltin) const { 11049 // Pass through to the C++ ABI object 11050 if (IsCXXMethod) 11051 return ABI->getDefaultMethodCallConv(IsVariadic); 11052 11053 // Builtins ignore user-specified default calling convention and remain the 11054 // Target's default calling convention. 11055 if (!IsBuiltin) { 11056 switch (LangOpts.getDefaultCallingConv()) { 11057 case LangOptions::DCC_None: 11058 break; 11059 case LangOptions::DCC_CDecl: 11060 return CC_C; 11061 case LangOptions::DCC_FastCall: 11062 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11063 return CC_X86FastCall; 11064 break; 11065 case LangOptions::DCC_StdCall: 11066 if (!IsVariadic) 11067 return CC_X86StdCall; 11068 break; 11069 case LangOptions::DCC_VectorCall: 11070 // __vectorcall cannot be applied to variadic functions. 11071 if (!IsVariadic) 11072 return CC_X86VectorCall; 11073 break; 11074 case LangOptions::DCC_RegCall: 11075 // __regcall cannot be applied to variadic functions. 11076 if (!IsVariadic) 11077 return CC_X86RegCall; 11078 break; 11079 } 11080 } 11081 return Target->getDefaultCallingConv(); 11082 } 11083 11084 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11085 // Pass through to the C++ ABI object 11086 return ABI->isNearlyEmpty(RD); 11087 } 11088 11089 VTableContextBase *ASTContext::getVTableContext() { 11090 if (!VTContext.get()) { 11091 auto ABI = Target->getCXXABI(); 11092 if (ABI.isMicrosoft()) 11093 VTContext.reset(new MicrosoftVTableContext(*this)); 11094 else { 11095 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11096 ? ItaniumVTableContext::Relative 11097 : ItaniumVTableContext::Pointer; 11098 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 11099 } 11100 } 11101 return VTContext.get(); 11102 } 11103 11104 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 11105 if (!T) 11106 T = Target; 11107 switch (T->getCXXABI().getKind()) { 11108 case TargetCXXABI::AppleARM64: 11109 case TargetCXXABI::Fuchsia: 11110 case TargetCXXABI::GenericAArch64: 11111 case TargetCXXABI::GenericItanium: 11112 case TargetCXXABI::GenericARM: 11113 case TargetCXXABI::GenericMIPS: 11114 case TargetCXXABI::iOS: 11115 case TargetCXXABI::WebAssembly: 11116 case TargetCXXABI::WatchOS: 11117 case TargetCXXABI::XL: 11118 return ItaniumMangleContext::create(*this, getDiagnostics()); 11119 case TargetCXXABI::Microsoft: 11120 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11121 } 11122 llvm_unreachable("Unsupported ABI"); 11123 } 11124 11125 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 11126 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 11127 "Device mangle context does not support Microsoft mangling."); 11128 switch (T.getCXXABI().getKind()) { 11129 case TargetCXXABI::AppleARM64: 11130 case TargetCXXABI::Fuchsia: 11131 case TargetCXXABI::GenericAArch64: 11132 case TargetCXXABI::GenericItanium: 11133 case TargetCXXABI::GenericARM: 11134 case TargetCXXABI::GenericMIPS: 11135 case TargetCXXABI::iOS: 11136 case TargetCXXABI::WebAssembly: 11137 case TargetCXXABI::WatchOS: 11138 case TargetCXXABI::XL: 11139 return ItaniumMangleContext::create( 11140 *this, getDiagnostics(), 11141 [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> { 11142 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 11143 return RD->getDeviceLambdaManglingNumber(); 11144 return llvm::None; 11145 }); 11146 case TargetCXXABI::Microsoft: 11147 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11148 } 11149 llvm_unreachable("Unsupported ABI"); 11150 } 11151 11152 CXXABI::~CXXABI() = default; 11153 11154 size_t ASTContext::getSideTableAllocatedMemory() const { 11155 return ASTRecordLayouts.getMemorySize() + 11156 llvm::capacity_in_bytes(ObjCLayouts) + 11157 llvm::capacity_in_bytes(KeyFunctions) + 11158 llvm::capacity_in_bytes(ObjCImpls) + 11159 llvm::capacity_in_bytes(BlockVarCopyInits) + 11160 llvm::capacity_in_bytes(DeclAttrs) + 11161 llvm::capacity_in_bytes(TemplateOrInstantiation) + 11162 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 11163 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 11164 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 11165 llvm::capacity_in_bytes(OverriddenMethods) + 11166 llvm::capacity_in_bytes(Types) + 11167 llvm::capacity_in_bytes(VariableArrayTypes); 11168 } 11169 11170 /// getIntTypeForBitwidth - 11171 /// sets integer QualTy according to specified details: 11172 /// bitwidth, signed/unsigned. 11173 /// Returns empty type if there is no appropriate target types. 11174 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 11175 unsigned Signed) const { 11176 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 11177 CanQualType QualTy = getFromTargetType(Ty); 11178 if (!QualTy && DestWidth == 128) 11179 return Signed ? Int128Ty : UnsignedInt128Ty; 11180 return QualTy; 11181 } 11182 11183 /// getRealTypeForBitwidth - 11184 /// sets floating point QualTy according to specified bitwidth. 11185 /// Returns empty type if there is no appropriate target types. 11186 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 11187 bool ExplicitIEEE) const { 11188 TargetInfo::RealType Ty = 11189 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitIEEE); 11190 switch (Ty) { 11191 case TargetInfo::Float: 11192 return FloatTy; 11193 case TargetInfo::Double: 11194 return DoubleTy; 11195 case TargetInfo::LongDouble: 11196 return LongDoubleTy; 11197 case TargetInfo::Float128: 11198 return Float128Ty; 11199 case TargetInfo::NoFloat: 11200 return {}; 11201 } 11202 11203 llvm_unreachable("Unhandled TargetInfo::RealType value"); 11204 } 11205 11206 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 11207 if (Number > 1) 11208 MangleNumbers[ND] = Number; 11209 } 11210 11211 unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const { 11212 auto I = MangleNumbers.find(ND); 11213 return I != MangleNumbers.end() ? I->second : 1; 11214 } 11215 11216 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 11217 if (Number > 1) 11218 StaticLocalNumbers[VD] = Number; 11219 } 11220 11221 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 11222 auto I = StaticLocalNumbers.find(VD); 11223 return I != StaticLocalNumbers.end() ? I->second : 1; 11224 } 11225 11226 MangleNumberingContext & 11227 ASTContext::getManglingNumberContext(const DeclContext *DC) { 11228 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11229 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 11230 if (!MCtx) 11231 MCtx = createMangleNumberingContext(); 11232 return *MCtx; 11233 } 11234 11235 MangleNumberingContext & 11236 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 11237 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11238 std::unique_ptr<MangleNumberingContext> &MCtx = 11239 ExtraMangleNumberingContexts[D]; 11240 if (!MCtx) 11241 MCtx = createMangleNumberingContext(); 11242 return *MCtx; 11243 } 11244 11245 std::unique_ptr<MangleNumberingContext> 11246 ASTContext::createMangleNumberingContext() const { 11247 return ABI->createMangleNumberingContext(); 11248 } 11249 11250 const CXXConstructorDecl * 11251 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 11252 return ABI->getCopyConstructorForExceptionObject( 11253 cast<CXXRecordDecl>(RD->getFirstDecl())); 11254 } 11255 11256 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 11257 CXXConstructorDecl *CD) { 11258 return ABI->addCopyConstructorForExceptionObject( 11259 cast<CXXRecordDecl>(RD->getFirstDecl()), 11260 cast<CXXConstructorDecl>(CD->getFirstDecl())); 11261 } 11262 11263 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 11264 TypedefNameDecl *DD) { 11265 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 11266 } 11267 11268 TypedefNameDecl * 11269 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 11270 return ABI->getTypedefNameForUnnamedTagDecl(TD); 11271 } 11272 11273 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 11274 DeclaratorDecl *DD) { 11275 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 11276 } 11277 11278 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 11279 return ABI->getDeclaratorForUnnamedTagDecl(TD); 11280 } 11281 11282 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 11283 ParamIndices[D] = index; 11284 } 11285 11286 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 11287 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 11288 assert(I != ParamIndices.end() && 11289 "ParmIndices lacks entry set by ParmVarDecl"); 11290 return I->second; 11291 } 11292 11293 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 11294 unsigned Length) const { 11295 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 11296 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 11297 EltTy = EltTy.withConst(); 11298 11299 EltTy = adjustStringLiteralBaseType(EltTy); 11300 11301 // Get an array type for the string, according to C99 6.4.5. This includes 11302 // the null terminator character. 11303 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 11304 ArrayType::Normal, /*IndexTypeQuals*/ 0); 11305 } 11306 11307 StringLiteral * 11308 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 11309 StringLiteral *&Result = StringLiteralCache[Key]; 11310 if (!Result) 11311 Result = StringLiteral::Create( 11312 *this, Key, StringLiteral::Ascii, 11313 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 11314 SourceLocation()); 11315 return Result; 11316 } 11317 11318 MSGuidDecl * 11319 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 11320 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 11321 11322 llvm::FoldingSetNodeID ID; 11323 MSGuidDecl::Profile(ID, Parts); 11324 11325 void *InsertPos; 11326 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 11327 return Existing; 11328 11329 QualType GUIDType = getMSGuidType().withConst(); 11330 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 11331 MSGuidDecls.InsertNode(New, InsertPos); 11332 return New; 11333 } 11334 11335 TemplateParamObjectDecl * 11336 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 11337 assert(T->isRecordType() && "template param object of unexpected type"); 11338 11339 // C++ [temp.param]p8: 11340 // [...] a static storage duration object of type 'const T' [...] 11341 T.addConst(); 11342 11343 llvm::FoldingSetNodeID ID; 11344 TemplateParamObjectDecl::Profile(ID, T, V); 11345 11346 void *InsertPos; 11347 if (TemplateParamObjectDecl *Existing = 11348 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 11349 return Existing; 11350 11351 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 11352 TemplateParamObjectDecls.InsertNode(New, InsertPos); 11353 return New; 11354 } 11355 11356 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 11357 const llvm::Triple &T = getTargetInfo().getTriple(); 11358 if (!T.isOSDarwin()) 11359 return false; 11360 11361 if (!(T.isiOS() && T.isOSVersionLT(7)) && 11362 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 11363 return false; 11364 11365 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 11366 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 11367 uint64_t Size = sizeChars.getQuantity(); 11368 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 11369 unsigned Align = alignChars.getQuantity(); 11370 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 11371 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 11372 } 11373 11374 bool 11375 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 11376 const ObjCMethodDecl *MethodImpl) { 11377 // No point trying to match an unavailable/deprecated mothod. 11378 if (MethodDecl->hasAttr<UnavailableAttr>() 11379 || MethodDecl->hasAttr<DeprecatedAttr>()) 11380 return false; 11381 if (MethodDecl->getObjCDeclQualifier() != 11382 MethodImpl->getObjCDeclQualifier()) 11383 return false; 11384 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 11385 return false; 11386 11387 if (MethodDecl->param_size() != MethodImpl->param_size()) 11388 return false; 11389 11390 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 11391 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 11392 EF = MethodDecl->param_end(); 11393 IM != EM && IF != EF; ++IM, ++IF) { 11394 const ParmVarDecl *DeclVar = (*IF); 11395 const ParmVarDecl *ImplVar = (*IM); 11396 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 11397 return false; 11398 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 11399 return false; 11400 } 11401 11402 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 11403 } 11404 11405 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 11406 LangAS AS; 11407 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 11408 AS = LangAS::Default; 11409 else 11410 AS = QT->getPointeeType().getAddressSpace(); 11411 11412 return getTargetInfo().getNullPointerValue(AS); 11413 } 11414 11415 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 11416 if (isTargetAddressSpace(AS)) 11417 return toTargetAddressSpace(AS); 11418 else 11419 return (*AddrSpaceMap)[(unsigned)AS]; 11420 } 11421 11422 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 11423 assert(Ty->isFixedPointType()); 11424 11425 if (Ty->isSaturatedFixedPointType()) return Ty; 11426 11427 switch (Ty->castAs<BuiltinType>()->getKind()) { 11428 default: 11429 llvm_unreachable("Not a fixed point type!"); 11430 case BuiltinType::ShortAccum: 11431 return SatShortAccumTy; 11432 case BuiltinType::Accum: 11433 return SatAccumTy; 11434 case BuiltinType::LongAccum: 11435 return SatLongAccumTy; 11436 case BuiltinType::UShortAccum: 11437 return SatUnsignedShortAccumTy; 11438 case BuiltinType::UAccum: 11439 return SatUnsignedAccumTy; 11440 case BuiltinType::ULongAccum: 11441 return SatUnsignedLongAccumTy; 11442 case BuiltinType::ShortFract: 11443 return SatShortFractTy; 11444 case BuiltinType::Fract: 11445 return SatFractTy; 11446 case BuiltinType::LongFract: 11447 return SatLongFractTy; 11448 case BuiltinType::UShortFract: 11449 return SatUnsignedShortFractTy; 11450 case BuiltinType::UFract: 11451 return SatUnsignedFractTy; 11452 case BuiltinType::ULongFract: 11453 return SatUnsignedLongFractTy; 11454 } 11455 } 11456 11457 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 11458 if (LangOpts.OpenCL) 11459 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 11460 11461 if (LangOpts.CUDA) 11462 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 11463 11464 return getLangASFromTargetAS(AS); 11465 } 11466 11467 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 11468 // doesn't include ASTContext.h 11469 template 11470 clang::LazyGenerationalUpdatePtr< 11471 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 11472 clang::LazyGenerationalUpdatePtr< 11473 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 11474 const clang::ASTContext &Ctx, Decl *Value); 11475 11476 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 11477 assert(Ty->isFixedPointType()); 11478 11479 const TargetInfo &Target = getTargetInfo(); 11480 switch (Ty->castAs<BuiltinType>()->getKind()) { 11481 default: 11482 llvm_unreachable("Not a fixed point type!"); 11483 case BuiltinType::ShortAccum: 11484 case BuiltinType::SatShortAccum: 11485 return Target.getShortAccumScale(); 11486 case BuiltinType::Accum: 11487 case BuiltinType::SatAccum: 11488 return Target.getAccumScale(); 11489 case BuiltinType::LongAccum: 11490 case BuiltinType::SatLongAccum: 11491 return Target.getLongAccumScale(); 11492 case BuiltinType::UShortAccum: 11493 case BuiltinType::SatUShortAccum: 11494 return Target.getUnsignedShortAccumScale(); 11495 case BuiltinType::UAccum: 11496 case BuiltinType::SatUAccum: 11497 return Target.getUnsignedAccumScale(); 11498 case BuiltinType::ULongAccum: 11499 case BuiltinType::SatULongAccum: 11500 return Target.getUnsignedLongAccumScale(); 11501 case BuiltinType::ShortFract: 11502 case BuiltinType::SatShortFract: 11503 return Target.getShortFractScale(); 11504 case BuiltinType::Fract: 11505 case BuiltinType::SatFract: 11506 return Target.getFractScale(); 11507 case BuiltinType::LongFract: 11508 case BuiltinType::SatLongFract: 11509 return Target.getLongFractScale(); 11510 case BuiltinType::UShortFract: 11511 case BuiltinType::SatUShortFract: 11512 return Target.getUnsignedShortFractScale(); 11513 case BuiltinType::UFract: 11514 case BuiltinType::SatUFract: 11515 return Target.getUnsignedFractScale(); 11516 case BuiltinType::ULongFract: 11517 case BuiltinType::SatULongFract: 11518 return Target.getUnsignedLongFractScale(); 11519 } 11520 } 11521 11522 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 11523 assert(Ty->isFixedPointType()); 11524 11525 const TargetInfo &Target = getTargetInfo(); 11526 switch (Ty->castAs<BuiltinType>()->getKind()) { 11527 default: 11528 llvm_unreachable("Not a fixed point type!"); 11529 case BuiltinType::ShortAccum: 11530 case BuiltinType::SatShortAccum: 11531 return Target.getShortAccumIBits(); 11532 case BuiltinType::Accum: 11533 case BuiltinType::SatAccum: 11534 return Target.getAccumIBits(); 11535 case BuiltinType::LongAccum: 11536 case BuiltinType::SatLongAccum: 11537 return Target.getLongAccumIBits(); 11538 case BuiltinType::UShortAccum: 11539 case BuiltinType::SatUShortAccum: 11540 return Target.getUnsignedShortAccumIBits(); 11541 case BuiltinType::UAccum: 11542 case BuiltinType::SatUAccum: 11543 return Target.getUnsignedAccumIBits(); 11544 case BuiltinType::ULongAccum: 11545 case BuiltinType::SatULongAccum: 11546 return Target.getUnsignedLongAccumIBits(); 11547 case BuiltinType::ShortFract: 11548 case BuiltinType::SatShortFract: 11549 case BuiltinType::Fract: 11550 case BuiltinType::SatFract: 11551 case BuiltinType::LongFract: 11552 case BuiltinType::SatLongFract: 11553 case BuiltinType::UShortFract: 11554 case BuiltinType::SatUShortFract: 11555 case BuiltinType::UFract: 11556 case BuiltinType::SatUFract: 11557 case BuiltinType::ULongFract: 11558 case BuiltinType::SatULongFract: 11559 return 0; 11560 } 11561 } 11562 11563 llvm::FixedPointSemantics 11564 ASTContext::getFixedPointSemantics(QualType Ty) const { 11565 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 11566 "Can only get the fixed point semantics for a " 11567 "fixed point or integer type."); 11568 if (Ty->isIntegerType()) 11569 return llvm::FixedPointSemantics::GetIntegerSemantics( 11570 getIntWidth(Ty), Ty->isSignedIntegerType()); 11571 11572 bool isSigned = Ty->isSignedFixedPointType(); 11573 return llvm::FixedPointSemantics( 11574 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 11575 Ty->isSaturatedFixedPointType(), 11576 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 11577 } 11578 11579 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 11580 assert(Ty->isFixedPointType()); 11581 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 11582 } 11583 11584 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 11585 assert(Ty->isFixedPointType()); 11586 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 11587 } 11588 11589 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 11590 assert(Ty->isUnsignedFixedPointType() && 11591 "Expected unsigned fixed point type"); 11592 11593 switch (Ty->castAs<BuiltinType>()->getKind()) { 11594 case BuiltinType::UShortAccum: 11595 return ShortAccumTy; 11596 case BuiltinType::UAccum: 11597 return AccumTy; 11598 case BuiltinType::ULongAccum: 11599 return LongAccumTy; 11600 case BuiltinType::SatUShortAccum: 11601 return SatShortAccumTy; 11602 case BuiltinType::SatUAccum: 11603 return SatAccumTy; 11604 case BuiltinType::SatULongAccum: 11605 return SatLongAccumTy; 11606 case BuiltinType::UShortFract: 11607 return ShortFractTy; 11608 case BuiltinType::UFract: 11609 return FractTy; 11610 case BuiltinType::ULongFract: 11611 return LongFractTy; 11612 case BuiltinType::SatUShortFract: 11613 return SatShortFractTy; 11614 case BuiltinType::SatUFract: 11615 return SatFractTy; 11616 case BuiltinType::SatULongFract: 11617 return SatLongFractTy; 11618 default: 11619 llvm_unreachable("Unexpected unsigned fixed point type"); 11620 } 11621 } 11622 11623 ParsedTargetAttr 11624 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 11625 assert(TD != nullptr); 11626 ParsedTargetAttr ParsedAttr = TD->parse(); 11627 11628 ParsedAttr.Features.erase( 11629 llvm::remove_if(ParsedAttr.Features, 11630 [&](const std::string &Feat) { 11631 return !Target->isValidFeatureName( 11632 StringRef{Feat}.substr(1)); 11633 }), 11634 ParsedAttr.Features.end()); 11635 return ParsedAttr; 11636 } 11637 11638 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 11639 const FunctionDecl *FD) const { 11640 if (FD) 11641 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 11642 else 11643 Target->initFeatureMap(FeatureMap, getDiagnostics(), 11644 Target->getTargetOpts().CPU, 11645 Target->getTargetOpts().Features); 11646 } 11647 11648 // Fills in the supplied string map with the set of target features for the 11649 // passed in function. 11650 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 11651 GlobalDecl GD) const { 11652 StringRef TargetCPU = Target->getTargetOpts().CPU; 11653 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 11654 if (const auto *TD = FD->getAttr<TargetAttr>()) { 11655 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 11656 11657 // Make a copy of the features as passed on the command line into the 11658 // beginning of the additional features from the function to override. 11659 ParsedAttr.Features.insert( 11660 ParsedAttr.Features.begin(), 11661 Target->getTargetOpts().FeaturesAsWritten.begin(), 11662 Target->getTargetOpts().FeaturesAsWritten.end()); 11663 11664 if (ParsedAttr.Architecture != "" && 11665 Target->isValidCPUName(ParsedAttr.Architecture)) 11666 TargetCPU = ParsedAttr.Architecture; 11667 11668 // Now populate the feature map, first with the TargetCPU which is either 11669 // the default or a new one from the target attribute string. Then we'll use 11670 // the passed in features (FeaturesAsWritten) along with the new ones from 11671 // the attribute. 11672 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 11673 ParsedAttr.Features); 11674 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 11675 llvm::SmallVector<StringRef, 32> FeaturesTmp; 11676 Target->getCPUSpecificCPUDispatchFeatures( 11677 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 11678 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 11679 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 11680 } else { 11681 FeatureMap = Target->getTargetOpts().FeatureMap; 11682 } 11683 } 11684 11685 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 11686 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 11687 return *OMPTraitInfoVector.back(); 11688 } 11689 11690 const StreamingDiagnostic &clang:: 11691 operator<<(const StreamingDiagnostic &DB, 11692 const ASTContext::SectionInfo &Section) { 11693 if (Section.Decl) 11694 return DB << Section.Decl; 11695 return DB << "a prior #pragma section"; 11696 } 11697 11698 bool ASTContext::mayExternalizeStaticVar(const Decl *D) const { 11699 bool IsStaticVar = 11700 isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static; 11701 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 11702 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 11703 (D->hasAttr<CUDAConstantAttr>() && 11704 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 11705 // CUDA/HIP: static managed variables need to be externalized since it is 11706 // a declaration in IR, therefore cannot have internal linkage. 11707 return IsStaticVar && 11708 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar); 11709 } 11710 11711 bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const { 11712 return mayExternalizeStaticVar(D) && 11713 (D->hasAttr<HIPManagedAttr>() || 11714 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 11715 } 11716 11717 StringRef ASTContext::getCUIDHash() const { 11718 if (!CUIDHash.empty()) 11719 return CUIDHash; 11720 if (LangOpts.CUID.empty()) 11721 return StringRef(); 11722 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 11723 return CUIDHash; 11724 } 11725 11726 // Get the closest named parent, so we can order the sycl naming decls somewhere 11727 // that mangling is meaningful. 11728 static const DeclContext *GetNamedParent(const CXXRecordDecl *RD) { 11729 const DeclContext *DC = RD->getDeclContext(); 11730 11731 while (!isa<NamedDecl, TranslationUnitDecl>(DC)) 11732 DC = DC->getParent(); 11733 return DC; 11734 } 11735 11736 void ASTContext::AddSYCLKernelNamingDecl(const CXXRecordDecl *RD) { 11737 assert(getLangOpts().isSYCL() && "Only valid for SYCL programs"); 11738 RD = RD->getCanonicalDecl(); 11739 const DeclContext *DC = GetNamedParent(RD); 11740 11741 assert(RD->getLocation().isValid() && 11742 "Invalid location on kernel naming decl"); 11743 11744 (void)SYCLKernelNamingTypes[DC].insert(RD); 11745 } 11746 11747 bool ASTContext::IsSYCLKernelNamingDecl(const NamedDecl *ND) const { 11748 assert(getLangOpts().isSYCL() && "Only valid for SYCL programs"); 11749 const auto *RD = dyn_cast<CXXRecordDecl>(ND); 11750 if (!RD) 11751 return false; 11752 RD = RD->getCanonicalDecl(); 11753 const DeclContext *DC = GetNamedParent(RD); 11754 11755 auto Itr = SYCLKernelNamingTypes.find(DC); 11756 11757 if (Itr == SYCLKernelNamingTypes.end()) 11758 return false; 11759 11760 return Itr->getSecond().count(RD); 11761 } 11762 11763 // Filters the Decls list to those that share the lambda mangling with the 11764 // passed RD. 11765 void ASTContext::FilterSYCLKernelNamingDecls( 11766 const CXXRecordDecl *RD, 11767 llvm::SmallVectorImpl<const CXXRecordDecl *> &Decls) { 11768 11769 if (!SYCLKernelFilterContext) 11770 SYCLKernelFilterContext.reset( 11771 ItaniumMangleContext::create(*this, getDiagnostics())); 11772 11773 llvm::SmallString<128> LambdaSig; 11774 llvm::raw_svector_ostream Out(LambdaSig); 11775 SYCLKernelFilterContext->mangleLambdaSig(RD, Out); 11776 11777 llvm::erase_if(Decls, [this, &LambdaSig](const CXXRecordDecl *LocalRD) { 11778 llvm::SmallString<128> LocalLambdaSig; 11779 llvm::raw_svector_ostream LocalOut(LocalLambdaSig); 11780 SYCLKernelFilterContext->mangleLambdaSig(LocalRD, LocalOut); 11781 return LambdaSig != LocalLambdaSig; 11782 }); 11783 } 11784 11785 unsigned ASTContext::GetSYCLKernelNamingIndex(const NamedDecl *ND) { 11786 assert(getLangOpts().isSYCL() && "Only valid for SYCL programs"); 11787 assert(IsSYCLKernelNamingDecl(ND) && 11788 "Lambda not involved in mangling asked for a naming index?"); 11789 11790 const CXXRecordDecl *RD = cast<CXXRecordDecl>(ND)->getCanonicalDecl(); 11791 const DeclContext *DC = GetNamedParent(RD); 11792 11793 auto Itr = SYCLKernelNamingTypes.find(DC); 11794 assert(Itr != SYCLKernelNamingTypes.end() && "Not a valid DeclContext?"); 11795 11796 const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &Set = Itr->getSecond(); 11797 11798 llvm::SmallVector<const CXXRecordDecl *> Decls{Set.begin(), Set.end()}; 11799 11800 FilterSYCLKernelNamingDecls(RD, Decls); 11801 11802 llvm::sort(Decls, [](const CXXRecordDecl *LHS, const CXXRecordDecl *RHS) { 11803 return LHS->getLambdaManglingNumber() < RHS->getLambdaManglingNumber(); 11804 }); 11805 11806 return llvm::find(Decls, RD) - Decls.begin(); 11807 } 11808