1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the actions class which performs semantic analysis and 10 // builds an AST out of a parse stream. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "UsedDeclVisitor.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/ASTDiagnostic.h" 17 #include "clang/AST/Decl.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclFriend.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/ExprCXX.h" 23 #include "clang/AST/PrettyDeclStackTrace.h" 24 #include "clang/AST/StmtCXX.h" 25 #include "clang/Basic/DarwinSDKInfo.h" 26 #include "clang/Basic/DiagnosticOptions.h" 27 #include "clang/Basic/PartialDiagnostic.h" 28 #include "clang/Basic/SourceManager.h" 29 #include "clang/Basic/Stack.h" 30 #include "clang/Basic/TargetInfo.h" 31 #include "clang/Lex/HeaderSearch.h" 32 #include "clang/Lex/HeaderSearchOptions.h" 33 #include "clang/Lex/Preprocessor.h" 34 #include "clang/Sema/CXXFieldCollector.h" 35 #include "clang/Sema/DelayedDiagnostic.h" 36 #include "clang/Sema/ExternalSemaSource.h" 37 #include "clang/Sema/Initialization.h" 38 #include "clang/Sema/MultiplexExternalSemaSource.h" 39 #include "clang/Sema/ObjCMethodList.h" 40 #include "clang/Sema/RISCVIntrinsicManager.h" 41 #include "clang/Sema/Scope.h" 42 #include "clang/Sema/ScopeInfo.h" 43 #include "clang/Sema/SemaConsumer.h" 44 #include "clang/Sema/SemaInternal.h" 45 #include "clang/Sema/TemplateDeduction.h" 46 #include "clang/Sema/TemplateInstCallback.h" 47 #include "clang/Sema/TypoCorrection.h" 48 #include "llvm/ADT/DenseMap.h" 49 #include "llvm/ADT/SmallPtrSet.h" 50 #include "llvm/Support/TimeProfiler.h" 51 52 using namespace clang; 53 using namespace sema; 54 55 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) { 56 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts); 57 } 58 59 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); } 60 61 DarwinSDKInfo * 62 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, 63 StringRef Platform) { 64 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking(); 65 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) { 66 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking) 67 << Platform; 68 WarnedDarwinSDKInfoMissing = true; 69 } 70 return SDKInfo; 71 } 72 73 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() { 74 if (CachedDarwinSDKInfo) 75 return CachedDarwinSDKInfo->get(); 76 auto SDKInfo = parseDarwinSDKInfo( 77 PP.getFileManager().getVirtualFileSystem(), 78 PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot); 79 if (SDKInfo && *SDKInfo) { 80 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo)); 81 return CachedDarwinSDKInfo->get(); 82 } 83 if (!SDKInfo) 84 llvm::consumeError(SDKInfo.takeError()); 85 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>(); 86 return nullptr; 87 } 88 89 IdentifierInfo * 90 Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, 91 unsigned int Index) { 92 std::string InventedName; 93 llvm::raw_string_ostream OS(InventedName); 94 95 if (!ParamName) 96 OS << "auto:" << Index + 1; 97 else 98 OS << ParamName->getName() << ":auto"; 99 100 OS.flush(); 101 return &Context.Idents.get(OS.str()); 102 } 103 104 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, 105 const Preprocessor &PP) { 106 PrintingPolicy Policy = Context.getPrintingPolicy(); 107 // In diagnostics, we print _Bool as bool if the latter is defined as the 108 // former. 109 Policy.Bool = Context.getLangOpts().Bool; 110 if (!Policy.Bool) { 111 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) { 112 Policy.Bool = BoolMacro->isObjectLike() && 113 BoolMacro->getNumTokens() == 1 && 114 BoolMacro->getReplacementToken(0).is(tok::kw__Bool); 115 } 116 } 117 118 // Shorten the data output if needed 119 Policy.EntireContentsOfLargeArray = false; 120 121 return Policy; 122 } 123 124 void Sema::ActOnTranslationUnitScope(Scope *S) { 125 TUScope = S; 126 PushDeclContext(S, Context.getTranslationUnitDecl()); 127 } 128 129 namespace clang { 130 namespace sema { 131 132 class SemaPPCallbacks : public PPCallbacks { 133 Sema *S = nullptr; 134 llvm::SmallVector<SourceLocation, 8> IncludeStack; 135 136 public: 137 void set(Sema &S) { this->S = &S; } 138 139 void reset() { S = nullptr; } 140 141 void FileChanged(SourceLocation Loc, FileChangeReason Reason, 142 SrcMgr::CharacteristicKind FileType, 143 FileID PrevFID) override { 144 if (!S) 145 return; 146 switch (Reason) { 147 case EnterFile: { 148 SourceManager &SM = S->getSourceManager(); 149 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc)); 150 if (IncludeLoc.isValid()) { 151 if (llvm::timeTraceProfilerEnabled()) { 152 const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc)); 153 llvm::timeTraceProfilerBegin( 154 "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>")); 155 } 156 157 IncludeStack.push_back(IncludeLoc); 158 S->DiagnoseNonDefaultPragmaAlignPack( 159 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude, 160 IncludeLoc); 161 } 162 break; 163 } 164 case ExitFile: 165 if (!IncludeStack.empty()) { 166 if (llvm::timeTraceProfilerEnabled()) 167 llvm::timeTraceProfilerEnd(); 168 169 S->DiagnoseNonDefaultPragmaAlignPack( 170 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit, 171 IncludeStack.pop_back_val()); 172 } 173 break; 174 default: 175 break; 176 } 177 } 178 }; 179 180 } // end namespace sema 181 } // end namespace clang 182 183 const unsigned Sema::MaxAlignmentExponent; 184 const uint64_t Sema::MaximumAlignment; 185 186 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, 187 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) 188 : ExternalSource(nullptr), isMultiplexExternalSource(false), 189 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), 190 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), 191 SourceMgr(PP.getSourceManager()), CollectStats(false), 192 CodeCompleter(CodeCompleter), CurContext(nullptr), 193 OriginalLexicalContext(nullptr), MSStructPragmaOn(false), 194 MSPointerToMemberRepresentationMethod( 195 LangOpts.getMSPointerToMemberRepresentationMethod()), 196 VtorDispStack(LangOpts.getVtorDispMode()), 197 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)), 198 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr), 199 CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()), 200 CurInitSeg(nullptr), VisContext(nullptr), 201 PragmaAttributeCurrentTargetDecl(nullptr), 202 IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr), 203 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp), 204 StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr), 205 StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr), 206 MSVCGuidDecl(nullptr), StdSourceLocationImplDecl(nullptr), 207 NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr), 208 StringWithUTF8StringMethod(nullptr), 209 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), 210 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), 211 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false), 212 TUKind(TUKind), NumSFINAEErrors(0), 213 FullyCheckedComparisonCategories( 214 static_cast<unsigned>(ComparisonCategoryType::Last) + 1), 215 SatisfactionCache(Context), AccessCheckingSFINAE(false), 216 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), 217 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), 218 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this), 219 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), 220 CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { 221 assert(pp.TUKind == TUKind); 222 TUScope = nullptr; 223 isConstantEvaluatedOverride = false; 224 225 LoadedExternalKnownNamespaces = false; 226 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I) 227 NSNumberLiteralMethods[I] = nullptr; 228 229 if (getLangOpts().ObjC) 230 NSAPIObj.reset(new NSAPI(Context)); 231 232 if (getLangOpts().CPlusPlus) 233 FieldCollector.reset(new CXXFieldCollector()); 234 235 // Tell diagnostics how to render things from the AST library. 236 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); 237 238 // This evaluation context exists to ensure that there's always at least one 239 // valid evaluation context available. It is never removed from the 240 // evaluation stack. 241 ExprEvalContexts.emplace_back( 242 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{}, 243 nullptr, ExpressionEvaluationContextRecord::EK_Other); 244 245 // Initialization of data sharing attributes stack for OpenMP 246 InitDataSharingAttributesStack(); 247 248 std::unique_ptr<sema::SemaPPCallbacks> Callbacks = 249 std::make_unique<sema::SemaPPCallbacks>(); 250 SemaPPCallbackHandler = Callbacks.get(); 251 PP.addPPCallbacks(std::move(Callbacks)); 252 SemaPPCallbackHandler->set(*this); 253 254 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod()); 255 } 256 257 // Anchor Sema's type info to this TU. 258 void Sema::anchor() {} 259 260 void Sema::addImplicitTypedef(StringRef Name, QualType T) { 261 DeclarationName DN = &Context.Idents.get(Name); 262 if (IdResolver.begin(DN) == IdResolver.end()) 263 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope); 264 } 265 266 void Sema::Initialize() { 267 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 268 SC->InitializeSema(*this); 269 270 // Tell the external Sema source about this Sema object. 271 if (ExternalSemaSource *ExternalSema 272 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 273 ExternalSema->InitializeSema(*this); 274 275 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we 276 // will not be able to merge any duplicate __va_list_tag decls correctly. 277 VAListTagName = PP.getIdentifierInfo("__va_list_tag"); 278 279 if (!TUScope) 280 return; 281 282 // Initialize predefined 128-bit integer types, if needed. 283 if (Context.getTargetInfo().hasInt128Type() || 284 (Context.getAuxTargetInfo() && 285 Context.getAuxTargetInfo()->hasInt128Type())) { 286 // If either of the 128-bit integer types are unavailable to name lookup, 287 // define them now. 288 DeclarationName Int128 = &Context.Idents.get("__int128_t"); 289 if (IdResolver.begin(Int128) == IdResolver.end()) 290 PushOnScopeChains(Context.getInt128Decl(), TUScope); 291 292 DeclarationName UInt128 = &Context.Idents.get("__uint128_t"); 293 if (IdResolver.begin(UInt128) == IdResolver.end()) 294 PushOnScopeChains(Context.getUInt128Decl(), TUScope); 295 } 296 297 298 // Initialize predefined Objective-C types: 299 if (getLangOpts().ObjC) { 300 // If 'SEL' does not yet refer to any declarations, make it refer to the 301 // predefined 'SEL'. 302 DeclarationName SEL = &Context.Idents.get("SEL"); 303 if (IdResolver.begin(SEL) == IdResolver.end()) 304 PushOnScopeChains(Context.getObjCSelDecl(), TUScope); 305 306 // If 'id' does not yet refer to any declarations, make it refer to the 307 // predefined 'id'. 308 DeclarationName Id = &Context.Idents.get("id"); 309 if (IdResolver.begin(Id) == IdResolver.end()) 310 PushOnScopeChains(Context.getObjCIdDecl(), TUScope); 311 312 // Create the built-in typedef for 'Class'. 313 DeclarationName Class = &Context.Idents.get("Class"); 314 if (IdResolver.begin(Class) == IdResolver.end()) 315 PushOnScopeChains(Context.getObjCClassDecl(), TUScope); 316 317 // Create the built-in forward declaratino for 'Protocol'. 318 DeclarationName Protocol = &Context.Idents.get("Protocol"); 319 if (IdResolver.begin(Protocol) == IdResolver.end()) 320 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope); 321 } 322 323 // Create the internal type for the *StringMakeConstantString builtins. 324 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString"); 325 if (IdResolver.begin(ConstantString) == IdResolver.end()) 326 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope); 327 328 // Initialize Microsoft "predefined C++ types". 329 if (getLangOpts().MSVCCompat) { 330 if (getLangOpts().CPlusPlus && 331 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) 332 PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class), 333 TUScope); 334 335 addImplicitTypedef("size_t", Context.getSizeType()); 336 } 337 338 // Initialize predefined OpenCL types and supported extensions and (optional) 339 // core features. 340 if (getLangOpts().OpenCL) { 341 getOpenCLOptions().addSupport( 342 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts()); 343 addImplicitTypedef("sampler_t", Context.OCLSamplerTy); 344 addImplicitTypedef("event_t", Context.OCLEventTy); 345 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion(); 346 if (OCLCompatibleVersion >= 200) { 347 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) { 348 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy); 349 addImplicitTypedef("queue_t", Context.OCLQueueTy); 350 } 351 if (getLangOpts().OpenCLPipes) 352 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy); 353 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); 354 addImplicitTypedef("atomic_uint", 355 Context.getAtomicType(Context.UnsignedIntTy)); 356 addImplicitTypedef("atomic_float", 357 Context.getAtomicType(Context.FloatTy)); 358 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as 359 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. 360 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); 361 362 363 // OpenCL v2.0 s6.13.11.6: 364 // - The atomic_long and atomic_ulong types are supported if the 365 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics 366 // extensions are supported. 367 // - The atomic_double type is only supported if double precision 368 // is supported and the cl_khr_int64_base_atomics and 369 // cl_khr_int64_extended_atomics extensions are supported. 370 // - If the device address space is 64-bits, the data types 371 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and 372 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and 373 // cl_khr_int64_extended_atomics extensions are supported. 374 375 auto AddPointerSizeDependentTypes = [&]() { 376 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType()); 377 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType()); 378 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType()); 379 auto AtomicPtrDiffT = 380 Context.getAtomicType(Context.getPointerDiffType()); 381 addImplicitTypedef("atomic_size_t", AtomicSizeT); 382 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT); 383 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT); 384 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT); 385 }; 386 387 if (Context.getTypeSize(Context.getSizeType()) == 32) { 388 AddPointerSizeDependentTypes(); 389 } 390 391 if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) { 392 auto AtomicHalfT = Context.getAtomicType(Context.HalfTy); 393 addImplicitTypedef("atomic_half", AtomicHalfT); 394 } 395 396 std::vector<QualType> Atomic64BitTypes; 397 if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics", 398 getLangOpts()) && 399 getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics", 400 getLangOpts())) { 401 if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) { 402 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy); 403 addImplicitTypedef("atomic_double", AtomicDoubleT); 404 Atomic64BitTypes.push_back(AtomicDoubleT); 405 } 406 auto AtomicLongT = Context.getAtomicType(Context.LongTy); 407 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy); 408 addImplicitTypedef("atomic_long", AtomicLongT); 409 addImplicitTypedef("atomic_ulong", AtomicULongT); 410 411 412 if (Context.getTypeSize(Context.getSizeType()) == 64) { 413 AddPointerSizeDependentTypes(); 414 } 415 } 416 } 417 418 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 419 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \ 420 addImplicitTypedef(#ExtType, Context.Id##Ty); \ 421 } 422 #include "clang/Basic/OpenCLExtensionTypes.def" 423 } 424 425 if (Context.getTargetInfo().hasAArch64SVETypes()) { 426 #define SVE_TYPE(Name, Id, SingletonId) \ 427 addImplicitTypedef(Name, Context.SingletonId); 428 #include "clang/Basic/AArch64SVEACLETypes.def" 429 } 430 431 if (Context.getTargetInfo().getTriple().isPPC64()) { 432 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 433 addImplicitTypedef(#Name, Context.Id##Ty); 434 #include "clang/Basic/PPCTypes.def" 435 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 436 addImplicitTypedef(#Name, Context.Id##Ty); 437 #include "clang/Basic/PPCTypes.def" 438 } 439 440 if (Context.getTargetInfo().hasRISCVVTypes()) { 441 #define RVV_TYPE(Name, Id, SingletonId) \ 442 addImplicitTypedef(Name, Context.SingletonId); 443 #include "clang/Basic/RISCVVTypes.def" 444 } 445 446 if (Context.getTargetInfo().hasBuiltinMSVaList()) { 447 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list"); 448 if (IdResolver.begin(MSVaList) == IdResolver.end()) 449 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope); 450 } 451 452 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); 453 if (IdResolver.begin(BuiltinVaList) == IdResolver.end()) 454 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope); 455 } 456 457 Sema::~Sema() { 458 assert(InstantiatingSpecializations.empty() && 459 "failed to clean up an InstantiatingTemplate?"); 460 461 if (VisContext) FreeVisContext(); 462 463 // Kill all the active scopes. 464 for (sema::FunctionScopeInfo *FSI : FunctionScopes) 465 delete FSI; 466 467 // Tell the SemaConsumer to forget about us; we're going out of scope. 468 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 469 SC->ForgetSema(); 470 471 // Detach from the external Sema source. 472 if (ExternalSemaSource *ExternalSema 473 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 474 ExternalSema->ForgetSema(); 475 476 // If Sema's ExternalSource is the multiplexer - we own it. 477 if (isMultiplexExternalSource) 478 delete ExternalSource; 479 480 // Delete cached satisfactions. 481 std::vector<ConstraintSatisfaction *> Satisfactions; 482 Satisfactions.reserve(Satisfactions.size()); 483 for (auto &Node : SatisfactionCache) 484 Satisfactions.push_back(&Node); 485 for (auto *Node : Satisfactions) 486 delete Node; 487 488 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); 489 490 // Destroys data sharing attributes stack for OpenMP 491 DestroyDataSharingAttributesStack(); 492 493 // Detach from the PP callback handler which outlives Sema since it's owned 494 // by the preprocessor. 495 SemaPPCallbackHandler->reset(); 496 } 497 498 void Sema::warnStackExhausted(SourceLocation Loc) { 499 // Only warn about this once. 500 if (!WarnedStackExhausted) { 501 Diag(Loc, diag::warn_stack_exhausted); 502 WarnedStackExhausted = true; 503 } 504 } 505 506 void Sema::runWithSufficientStackSpace(SourceLocation Loc, 507 llvm::function_ref<void()> Fn) { 508 clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn); 509 } 510 511 /// makeUnavailableInSystemHeader - There is an error in the current 512 /// context. If we're still in a system header, and we can plausibly 513 /// make the relevant declaration unavailable instead of erroring, do 514 /// so and return true. 515 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc, 516 UnavailableAttr::ImplicitReason reason) { 517 // If we're not in a function, it's an error. 518 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext); 519 if (!fn) return false; 520 521 // If we're in template instantiation, it's an error. 522 if (inTemplateInstantiation()) 523 return false; 524 525 // If that function's not in a system header, it's an error. 526 if (!Context.getSourceManager().isInSystemHeader(loc)) 527 return false; 528 529 // If the function is already unavailable, it's not an error. 530 if (fn->hasAttr<UnavailableAttr>()) return true; 531 532 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc)); 533 return true; 534 } 535 536 ASTMutationListener *Sema::getASTMutationListener() const { 537 return getASTConsumer().GetASTMutationListener(); 538 } 539 540 ///Registers an external source. If an external source already exists, 541 /// creates a multiplex external source and appends to it. 542 /// 543 ///\param[in] E - A non-null external sema source. 544 /// 545 void Sema::addExternalSource(ExternalSemaSource *E) { 546 assert(E && "Cannot use with NULL ptr"); 547 548 if (!ExternalSource) { 549 ExternalSource = E; 550 return; 551 } 552 553 if (isMultiplexExternalSource) 554 static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E); 555 else { 556 ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E); 557 isMultiplexExternalSource = true; 558 } 559 } 560 561 /// Print out statistics about the semantic analysis. 562 void Sema::PrintStats() const { 563 llvm::errs() << "\n*** Semantic Analysis Stats:\n"; 564 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n"; 565 566 BumpAlloc.PrintStats(); 567 AnalysisWarnings.PrintStats(); 568 } 569 570 void Sema::diagnoseNullableToNonnullConversion(QualType DstType, 571 QualType SrcType, 572 SourceLocation Loc) { 573 Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context); 574 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable && 575 *ExprNullability != NullabilityKind::NullableResult)) 576 return; 577 578 Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context); 579 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull) 580 return; 581 582 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType; 583 } 584 585 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) { 586 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant, 587 E->getBeginLoc())) 588 return; 589 // nullptr only exists from C++11 on, so don't warn on its absence earlier. 590 if (!getLangOpts().CPlusPlus11) 591 return; 592 593 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer) 594 return; 595 if (E->IgnoreParenImpCasts()->getType()->isNullPtrType()) 596 return; 597 598 // Don't diagnose the conversion from a 0 literal to a null pointer argument 599 // in a synthesized call to operator<=>. 600 if (!CodeSynthesisContexts.empty() && 601 CodeSynthesisContexts.back().Kind == 602 CodeSynthesisContext::RewritingOperatorAsSpaceship) 603 return; 604 605 // If it is a macro from system header, and if the macro name is not "NULL", 606 // do not warn. 607 SourceLocation MaybeMacroLoc = E->getBeginLoc(); 608 if (Diags.getSuppressSystemWarnings() && 609 SourceMgr.isInSystemMacro(MaybeMacroLoc) && 610 !findMacroSpelling(MaybeMacroLoc, "NULL")) 611 return; 612 613 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant) 614 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr"); 615 } 616 617 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast. 618 /// If there is already an implicit cast, merge into the existing one. 619 /// The result is of the given category. 620 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, 621 CastKind Kind, ExprValueKind VK, 622 const CXXCastPath *BasePath, 623 CheckedConversionKind CCK) { 624 #ifndef NDEBUG 625 if (VK == VK_PRValue && !E->isPRValue()) { 626 switch (Kind) { 627 default: 628 llvm_unreachable( 629 ("can't implicitly cast glvalue to prvalue with this cast " 630 "kind: " + 631 std::string(CastExpr::getCastKindName(Kind))) 632 .c_str()); 633 case CK_Dependent: 634 case CK_LValueToRValue: 635 case CK_ArrayToPointerDecay: 636 case CK_FunctionToPointerDecay: 637 case CK_ToVoid: 638 case CK_NonAtomicToAtomic: 639 break; 640 } 641 } 642 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) && 643 "can't cast prvalue to glvalue"); 644 #endif 645 646 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc()); 647 diagnoseZeroToNullptrConversion(Kind, E); 648 649 QualType ExprTy = Context.getCanonicalType(E->getType()); 650 QualType TypeTy = Context.getCanonicalType(Ty); 651 652 if (ExprTy == TypeTy) 653 return E; 654 655 if (Kind == CK_ArrayToPointerDecay) { 656 // C++1z [conv.array]: The temporary materialization conversion is applied. 657 // We also use this to fuel C++ DR1213, which applies to C++11 onwards. 658 if (getLangOpts().CPlusPlus && E->isPRValue()) { 659 // The temporary is an lvalue in C++98 and an xvalue otherwise. 660 ExprResult Materialized = CreateMaterializeTemporaryExpr( 661 E->getType(), E, !getLangOpts().CPlusPlus11); 662 if (Materialized.isInvalid()) 663 return ExprError(); 664 E = Materialized.get(); 665 } 666 // C17 6.7.1p6 footnote 124: The implementation can treat any register 667 // declaration simply as an auto declaration. However, whether or not 668 // addressable storage is actually used, the address of any part of an 669 // object declared with storage-class specifier register cannot be 670 // computed, either explicitly(by use of the unary & operator as discussed 671 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as 672 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an 673 // array declared with storage-class specifier register is sizeof. 674 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) { 675 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 676 if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 677 if (VD->getStorageClass() == SC_Register) { 678 Diag(E->getExprLoc(), diag::err_typecheck_address_of) 679 << /*register variable*/ 3 << E->getSourceRange(); 680 return ExprError(); 681 } 682 } 683 } 684 } 685 } 686 687 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { 688 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { 689 ImpCast->setType(Ty); 690 ImpCast->setValueKind(VK); 691 return E; 692 } 693 } 694 695 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK, 696 CurFPFeatureOverrides()); 697 } 698 699 /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding 700 /// to the conversion from scalar type ScalarTy to the Boolean type. 701 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) { 702 switch (ScalarTy->getScalarTypeKind()) { 703 case Type::STK_Bool: return CK_NoOp; 704 case Type::STK_CPointer: return CK_PointerToBoolean; 705 case Type::STK_BlockPointer: return CK_PointerToBoolean; 706 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean; 707 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean; 708 case Type::STK_Integral: return CK_IntegralToBoolean; 709 case Type::STK_Floating: return CK_FloatingToBoolean; 710 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean; 711 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean; 712 case Type::STK_FixedPoint: return CK_FixedPointToBoolean; 713 } 714 llvm_unreachable("unknown scalar type kind"); 715 } 716 717 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector. 718 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) { 719 if (D->getMostRecentDecl()->isUsed()) 720 return true; 721 722 if (D->isExternallyVisible()) 723 return true; 724 725 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 726 // If this is a function template and none of its specializations is used, 727 // we should warn. 728 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate()) 729 for (const auto *Spec : Template->specializations()) 730 if (ShouldRemoveFromUnused(SemaRef, Spec)) 731 return true; 732 733 // UnusedFileScopedDecls stores the first declaration. 734 // The declaration may have become definition so check again. 735 const FunctionDecl *DeclToCheck; 736 if (FD->hasBody(DeclToCheck)) 737 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 738 739 // Later redecls may add new information resulting in not having to warn, 740 // so check again. 741 DeclToCheck = FD->getMostRecentDecl(); 742 if (DeclToCheck != FD) 743 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 744 } 745 746 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 747 // If a variable usable in constant expressions is referenced, 748 // don't warn if it isn't used: if the value of a variable is required 749 // for the computation of a constant expression, it doesn't make sense to 750 // warn even if the variable isn't odr-used. (isReferenced doesn't 751 // precisely reflect that, but it's a decent approximation.) 752 if (VD->isReferenced() && 753 VD->mightBeUsableInConstantExpressions(SemaRef->Context)) 754 return true; 755 756 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate()) 757 // If this is a variable template and none of its specializations is used, 758 // we should warn. 759 for (const auto *Spec : Template->specializations()) 760 if (ShouldRemoveFromUnused(SemaRef, Spec)) 761 return true; 762 763 // UnusedFileScopedDecls stores the first declaration. 764 // The declaration may have become definition so check again. 765 const VarDecl *DeclToCheck = VD->getDefinition(); 766 if (DeclToCheck) 767 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 768 769 // Later redecls may add new information resulting in not having to warn, 770 // so check again. 771 DeclToCheck = VD->getMostRecentDecl(); 772 if (DeclToCheck != VD) 773 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 774 } 775 776 return false; 777 } 778 779 static bool isFunctionOrVarDeclExternC(NamedDecl *ND) { 780 if (auto *FD = dyn_cast<FunctionDecl>(ND)) 781 return FD->isExternC(); 782 return cast<VarDecl>(ND)->isExternC(); 783 } 784 785 /// Determine whether ND is an external-linkage function or variable whose 786 /// type has no linkage. 787 bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) { 788 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage, 789 // because we also want to catch the case where its type has VisibleNoLinkage, 790 // which does not affect the linkage of VD. 791 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() && 792 !isExternalFormalLinkage(VD->getType()->getLinkage()) && 793 !isFunctionOrVarDeclExternC(VD); 794 } 795 796 /// Obtains a sorted list of functions and variables that are undefined but 797 /// ODR-used. 798 void Sema::getUndefinedButUsed( 799 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) { 800 for (const auto &UndefinedUse : UndefinedButUsed) { 801 NamedDecl *ND = UndefinedUse.first; 802 803 // Ignore attributes that have become invalid. 804 if (ND->isInvalidDecl()) continue; 805 806 // __attribute__((weakref)) is basically a definition. 807 if (ND->hasAttr<WeakRefAttr>()) continue; 808 809 if (isa<CXXDeductionGuideDecl>(ND)) 810 continue; 811 812 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) { 813 // An exported function will always be emitted when defined, so even if 814 // the function is inline, it doesn't have to be emitted in this TU. An 815 // imported function implies that it has been exported somewhere else. 816 continue; 817 } 818 819 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { 820 if (FD->isDefined()) 821 continue; 822 if (FD->isExternallyVisible() && 823 !isExternalWithNoLinkageType(FD) && 824 !FD->getMostRecentDecl()->isInlined() && 825 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 826 continue; 827 if (FD->getBuiltinID()) 828 continue; 829 } else { 830 auto *VD = cast<VarDecl>(ND); 831 if (VD->hasDefinition() != VarDecl::DeclarationOnly) 832 continue; 833 if (VD->isExternallyVisible() && 834 !isExternalWithNoLinkageType(VD) && 835 !VD->getMostRecentDecl()->isInline() && 836 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 837 continue; 838 839 // Skip VarDecls that lack formal definitions but which we know are in 840 // fact defined somewhere. 841 if (VD->isKnownToBeDefined()) 842 continue; 843 } 844 845 Undefined.push_back(std::make_pair(ND, UndefinedUse.second)); 846 } 847 } 848 849 /// checkUndefinedButUsed - Check for undefined objects with internal linkage 850 /// or that are inline. 851 static void checkUndefinedButUsed(Sema &S) { 852 if (S.UndefinedButUsed.empty()) return; 853 854 // Collect all the still-undefined entities with internal linkage. 855 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined; 856 S.getUndefinedButUsed(Undefined); 857 if (Undefined.empty()) return; 858 859 for (auto Undef : Undefined) { 860 ValueDecl *VD = cast<ValueDecl>(Undef.first); 861 SourceLocation UseLoc = Undef.second; 862 863 if (S.isExternalWithNoLinkageType(VD)) { 864 // C++ [basic.link]p8: 865 // A type without linkage shall not be used as the type of a variable 866 // or function with external linkage unless 867 // -- the entity has C language linkage 868 // -- the entity is not odr-used or is defined in the same TU 869 // 870 // As an extension, accept this in cases where the type is externally 871 // visible, since the function or variable actually can be defined in 872 // another translation unit in that case. 873 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage()) 874 ? diag::ext_undefined_internal_type 875 : diag::err_undefined_internal_type) 876 << isa<VarDecl>(VD) << VD; 877 } else if (!VD->isExternallyVisible()) { 878 // FIXME: We can promote this to an error. The function or variable can't 879 // be defined anywhere else, so the program must necessarily violate the 880 // one definition rule. 881 bool IsImplicitBase = false; 882 if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) { 883 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>(); 884 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive( 885 llvm::omp::TraitProperty:: 886 implementation_extension_disable_implicit_base)) { 887 const auto *Func = cast<FunctionDecl>( 888 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl()); 889 IsImplicitBase = BaseD->isImplicit() && 890 Func->getIdentifier()->isMangledOpenMPVariantName(); 891 } 892 } 893 if (!S.getLangOpts().OpenMP || !IsImplicitBase) 894 S.Diag(VD->getLocation(), diag::warn_undefined_internal) 895 << isa<VarDecl>(VD) << VD; 896 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) { 897 (void)FD; 898 assert(FD->getMostRecentDecl()->isInlined() && 899 "used object requires definition but isn't inline or internal?"); 900 // FIXME: This is ill-formed; we should reject. 901 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD; 902 } else { 903 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() && 904 "used var requires definition but isn't inline or internal?"); 905 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD; 906 } 907 if (UseLoc.isValid()) 908 S.Diag(UseLoc, diag::note_used_here); 909 } 910 911 S.UndefinedButUsed.clear(); 912 } 913 914 void Sema::LoadExternalWeakUndeclaredIdentifiers() { 915 if (!ExternalSource) 916 return; 917 918 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; 919 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); 920 for (auto &WeakID : WeakIDs) 921 (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second); 922 } 923 924 925 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap; 926 927 /// Returns true, if all methods and nested classes of the given 928 /// CXXRecordDecl are defined in this translation unit. 929 /// 930 /// Should only be called from ActOnEndOfTranslationUnit so that all 931 /// definitions are actually read. 932 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD, 933 RecordCompleteMap &MNCComplete) { 934 RecordCompleteMap::iterator Cache = MNCComplete.find(RD); 935 if (Cache != MNCComplete.end()) 936 return Cache->second; 937 if (!RD->isCompleteDefinition()) 938 return false; 939 bool Complete = true; 940 for (DeclContext::decl_iterator I = RD->decls_begin(), 941 E = RD->decls_end(); 942 I != E && Complete; ++I) { 943 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) 944 Complete = M->isDefined() || M->isDefaulted() || 945 (M->isPure() && !isa<CXXDestructorDecl>(M)); 946 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I)) 947 // If the template function is marked as late template parsed at this 948 // point, it has not been instantiated and therefore we have not 949 // performed semantic analysis on it yet, so we cannot know if the type 950 // can be considered complete. 951 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() && 952 F->getTemplatedDecl()->isDefined(); 953 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) { 954 if (R->isInjectedClassName()) 955 continue; 956 if (R->hasDefinition()) 957 Complete = MethodsAndNestedClassesComplete(R->getDefinition(), 958 MNCComplete); 959 else 960 Complete = false; 961 } 962 } 963 MNCComplete[RD] = Complete; 964 return Complete; 965 } 966 967 /// Returns true, if the given CXXRecordDecl is fully defined in this 968 /// translation unit, i.e. all methods are defined or pure virtual and all 969 /// friends, friend functions and nested classes are fully defined in this 970 /// translation unit. 971 /// 972 /// Should only be called from ActOnEndOfTranslationUnit so that all 973 /// definitions are actually read. 974 static bool IsRecordFullyDefined(const CXXRecordDecl *RD, 975 RecordCompleteMap &RecordsComplete, 976 RecordCompleteMap &MNCComplete) { 977 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD); 978 if (Cache != RecordsComplete.end()) 979 return Cache->second; 980 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete); 981 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(), 982 E = RD->friend_end(); 983 I != E && Complete; ++I) { 984 // Check if friend classes and methods are complete. 985 if (TypeSourceInfo *TSI = (*I)->getFriendType()) { 986 // Friend classes are available as the TypeSourceInfo of the FriendDecl. 987 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl()) 988 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete); 989 else 990 Complete = false; 991 } else { 992 // Friend functions are available through the NamedDecl of FriendDecl. 993 if (const FunctionDecl *FD = 994 dyn_cast<FunctionDecl>((*I)->getFriendDecl())) 995 Complete = FD->isDefined(); 996 else 997 // This is a template friend, give up. 998 Complete = false; 999 } 1000 } 1001 RecordsComplete[RD] = Complete; 1002 return Complete; 1003 } 1004 1005 void Sema::emitAndClearUnusedLocalTypedefWarnings() { 1006 if (ExternalSource) 1007 ExternalSource->ReadUnusedLocalTypedefNameCandidates( 1008 UnusedLocalTypedefNameCandidates); 1009 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) { 1010 if (TD->isReferenced()) 1011 continue; 1012 Diag(TD->getLocation(), diag::warn_unused_local_typedef) 1013 << isa<TypeAliasDecl>(TD) << TD->getDeclName(); 1014 } 1015 UnusedLocalTypedefNameCandidates.clear(); 1016 } 1017 1018 /// This is called before the very first declaration in the translation unit 1019 /// is parsed. Note that the ASTContext may have already injected some 1020 /// declarations. 1021 void Sema::ActOnStartOfTranslationUnit() { 1022 if (getLangOpts().CPlusPlusModules && 1023 getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit) 1024 HandleStartOfHeaderUnit(); 1025 else if (getLangOpts().ModulesTS && 1026 (getLangOpts().getCompilingModule() == 1027 LangOptions::CMK_ModuleInterface || 1028 getLangOpts().getCompilingModule() == LangOptions::CMK_None)) { 1029 // We start in an implied global module fragment. 1030 SourceLocation StartOfTU = 1031 SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); 1032 ActOnGlobalModuleFragmentDecl(StartOfTU); 1033 ModuleScopes.back().ImplicitGlobalModuleFragment = true; 1034 } 1035 } 1036 1037 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) { 1038 // No explicit actions are required at the end of the global module fragment. 1039 if (Kind == TUFragmentKind::Global) 1040 return; 1041 1042 // Transfer late parsed template instantiations over to the pending template 1043 // instantiation list. During normal compilation, the late template parser 1044 // will be installed and instantiating these templates will succeed. 1045 // 1046 // If we are building a TU prefix for serialization, it is also safe to 1047 // transfer these over, even though they are not parsed. The end of the TU 1048 // should be outside of any eager template instantiation scope, so when this 1049 // AST is deserialized, these templates will not be parsed until the end of 1050 // the combined TU. 1051 PendingInstantiations.insert(PendingInstantiations.end(), 1052 LateParsedInstantiations.begin(), 1053 LateParsedInstantiations.end()); 1054 LateParsedInstantiations.clear(); 1055 1056 // If DefinedUsedVTables ends up marking any virtual member functions it 1057 // might lead to more pending template instantiations, which we then need 1058 // to instantiate. 1059 DefineUsedVTables(); 1060 1061 // C++: Perform implicit template instantiations. 1062 // 1063 // FIXME: When we perform these implicit instantiations, we do not 1064 // carefully keep track of the point of instantiation (C++ [temp.point]). 1065 // This means that name lookup that occurs within the template 1066 // instantiation will always happen at the end of the translation unit, 1067 // so it will find some names that are not required to be found. This is 1068 // valid, but we could do better by diagnosing if an instantiation uses a 1069 // name that was not visible at its first point of instantiation. 1070 if (ExternalSource) { 1071 // Load pending instantiations from the external source. 1072 SmallVector<PendingImplicitInstantiation, 4> Pending; 1073 ExternalSource->ReadPendingInstantiations(Pending); 1074 for (auto PII : Pending) 1075 if (auto Func = dyn_cast<FunctionDecl>(PII.first)) 1076 Func->setInstantiationIsPending(true); 1077 PendingInstantiations.insert(PendingInstantiations.begin(), 1078 Pending.begin(), Pending.end()); 1079 } 1080 1081 { 1082 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1083 PerformPendingInstantiations(); 1084 } 1085 1086 emitDeferredDiags(); 1087 1088 assert(LateParsedInstantiations.empty() && 1089 "end of TU template instantiation should not create more " 1090 "late-parsed templates"); 1091 1092 // Report diagnostics for uncorrected delayed typos. Ideally all of them 1093 // should have been corrected by that time, but it is very hard to cover all 1094 // cases in practice. 1095 for (const auto &Typo : DelayedTypos) { 1096 // We pass an empty TypoCorrection to indicate no correction was performed. 1097 Typo.second.DiagHandler(TypoCorrection()); 1098 } 1099 DelayedTypos.clear(); 1100 } 1101 1102 /// ActOnEndOfTranslationUnit - This is called at the very end of the 1103 /// translation unit when EOF is reached and all but the top-level scope is 1104 /// popped. 1105 void Sema::ActOnEndOfTranslationUnit() { 1106 assert(DelayedDiagnostics.getCurrentPool() == nullptr 1107 && "reached end of translation unit with a pool attached?"); 1108 1109 // If code completion is enabled, don't perform any end-of-translation-unit 1110 // work. 1111 if (PP.isCodeCompletionEnabled()) 1112 return; 1113 1114 // Complete translation units and modules define vtables and perform implicit 1115 // instantiations. PCH files do not. 1116 if (TUKind != TU_Prefix) { 1117 DiagnoseUseOfUnimplementedSelectors(); 1118 1119 ActOnEndOfTranslationUnitFragment( 1120 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind == 1121 Module::PrivateModuleFragment 1122 ? TUFragmentKind::Private 1123 : TUFragmentKind::Normal); 1124 1125 if (LateTemplateParserCleanup) 1126 LateTemplateParserCleanup(OpaqueParser); 1127 1128 CheckDelayedMemberExceptionSpecs(); 1129 } else { 1130 // If we are building a TU prefix for serialization, it is safe to transfer 1131 // these over, even though they are not parsed. The end of the TU should be 1132 // outside of any eager template instantiation scope, so when this AST is 1133 // deserialized, these templates will not be parsed until the end of the 1134 // combined TU. 1135 PendingInstantiations.insert(PendingInstantiations.end(), 1136 LateParsedInstantiations.begin(), 1137 LateParsedInstantiations.end()); 1138 LateParsedInstantiations.clear(); 1139 1140 if (LangOpts.PCHInstantiateTemplates) { 1141 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1142 PerformPendingInstantiations(); 1143 } 1144 } 1145 1146 DiagnoseUnterminatedPragmaAlignPack(); 1147 DiagnoseUnterminatedPragmaAttribute(); 1148 DiagnoseUnterminatedOpenMPDeclareTarget(); 1149 1150 // All delayed member exception specs should be checked or we end up accepting 1151 // incompatible declarations. 1152 assert(DelayedOverridingExceptionSpecChecks.empty()); 1153 assert(DelayedEquivalentExceptionSpecChecks.empty()); 1154 1155 // All dllexport classes should have been processed already. 1156 assert(DelayedDllExportClasses.empty()); 1157 assert(DelayedDllExportMemberFunctions.empty()); 1158 1159 // Remove file scoped decls that turned out to be used. 1160 UnusedFileScopedDecls.erase( 1161 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true), 1162 UnusedFileScopedDecls.end(), 1163 [this](const DeclaratorDecl *DD) { 1164 return ShouldRemoveFromUnused(this, DD); 1165 }), 1166 UnusedFileScopedDecls.end()); 1167 1168 if (TUKind == TU_Prefix) { 1169 // Translation unit prefixes don't need any of the checking below. 1170 if (!PP.isIncrementalProcessingEnabled()) 1171 TUScope = nullptr; 1172 return; 1173 } 1174 1175 // Check for #pragma weak identifiers that were never declared 1176 LoadExternalWeakUndeclaredIdentifiers(); 1177 for (const auto &WeakIDs : WeakUndeclaredIdentifiers) { 1178 if (WeakIDs.second.empty()) 1179 continue; 1180 1181 Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(), 1182 LookupOrdinaryName); 1183 if (PrevDecl != nullptr && 1184 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) 1185 for (const auto &WI : WeakIDs.second) 1186 Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type) 1187 << "'weak'" << ExpectedVariableOrFunction; 1188 else 1189 for (const auto &WI : WeakIDs.second) 1190 Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared) 1191 << WeakIDs.first; 1192 } 1193 1194 if (LangOpts.CPlusPlus11 && 1195 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation())) 1196 CheckDelegatingCtorCycles(); 1197 1198 if (!Diags.hasErrorOccurred()) { 1199 if (ExternalSource) 1200 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed); 1201 checkUndefinedButUsed(*this); 1202 } 1203 1204 // A global-module-fragment is only permitted within a module unit. 1205 bool DiagnosedMissingModuleDeclaration = false; 1206 if (!ModuleScopes.empty() && 1207 ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment && 1208 !ModuleScopes.back().ImplicitGlobalModuleFragment) { 1209 Diag(ModuleScopes.back().BeginLoc, 1210 diag::err_module_declaration_missing_after_global_module_introducer); 1211 DiagnosedMissingModuleDeclaration = true; 1212 } 1213 1214 if (TUKind == TU_Module) { 1215 // If we are building a module interface unit, we need to have seen the 1216 // module declaration by now. 1217 if (getLangOpts().getCompilingModule() == 1218 LangOptions::CMK_ModuleInterface && 1219 (ModuleScopes.empty() || 1220 !ModuleScopes.back().Module->isModulePurview()) && 1221 !DiagnosedMissingModuleDeclaration) { 1222 // FIXME: Make a better guess as to where to put the module declaration. 1223 Diag(getSourceManager().getLocForStartOfFile( 1224 getSourceManager().getMainFileID()), 1225 diag::err_module_declaration_missing); 1226 } 1227 1228 // If we are building a module, resolve all of the exported declarations 1229 // now. 1230 if (Module *CurrentModule = PP.getCurrentModule()) { 1231 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); 1232 1233 SmallVector<Module *, 2> Stack; 1234 Stack.push_back(CurrentModule); 1235 while (!Stack.empty()) { 1236 Module *Mod = Stack.pop_back_val(); 1237 1238 // Resolve the exported declarations and conflicts. 1239 // FIXME: Actually complain, once we figure out how to teach the 1240 // diagnostic client to deal with complaints in the module map at this 1241 // point. 1242 ModMap.resolveExports(Mod, /*Complain=*/false); 1243 ModMap.resolveUses(Mod, /*Complain=*/false); 1244 ModMap.resolveConflicts(Mod, /*Complain=*/false); 1245 1246 // Queue the submodules, so their exports will also be resolved. 1247 Stack.append(Mod->submodule_begin(), Mod->submodule_end()); 1248 } 1249 } 1250 1251 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for 1252 // modules when they are built, not every time they are used. 1253 emitAndClearUnusedLocalTypedefWarnings(); 1254 } 1255 1256 // C99 6.9.2p2: 1257 // A declaration of an identifier for an object that has file 1258 // scope without an initializer, and without a storage-class 1259 // specifier or with the storage-class specifier static, 1260 // constitutes a tentative definition. If a translation unit 1261 // contains one or more tentative definitions for an identifier, 1262 // and the translation unit contains no external definition for 1263 // that identifier, then the behavior is exactly as if the 1264 // translation unit contains a file scope declaration of that 1265 // identifier, with the composite type as of the end of the 1266 // translation unit, with an initializer equal to 0. 1267 llvm::SmallSet<VarDecl *, 32> Seen; 1268 for (TentativeDefinitionsType::iterator 1269 T = TentativeDefinitions.begin(ExternalSource), 1270 TEnd = TentativeDefinitions.end(); 1271 T != TEnd; ++T) { 1272 VarDecl *VD = (*T)->getActingDefinition(); 1273 1274 // If the tentative definition was completed, getActingDefinition() returns 1275 // null. If we've already seen this variable before, insert()'s second 1276 // return value is false. 1277 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second) 1278 continue; 1279 1280 if (const IncompleteArrayType *ArrayT 1281 = Context.getAsIncompleteArrayType(VD->getType())) { 1282 // Set the length of the array to 1 (C99 6.9.2p5). 1283 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array); 1284 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true); 1285 QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One, 1286 nullptr, ArrayType::Normal, 0); 1287 VD->setType(T); 1288 } else if (RequireCompleteType(VD->getLocation(), VD->getType(), 1289 diag::err_tentative_def_incomplete_type)) 1290 VD->setInvalidDecl(); 1291 1292 // No initialization is performed for a tentative definition. 1293 CheckCompleteVariableDeclaration(VD); 1294 1295 // Notify the consumer that we've completed a tentative definition. 1296 if (!VD->isInvalidDecl()) 1297 Consumer.CompleteTentativeDefinition(VD); 1298 } 1299 1300 for (auto D : ExternalDeclarations) { 1301 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed()) 1302 continue; 1303 1304 Consumer.CompleteExternalDeclaration(D); 1305 } 1306 1307 // If there were errors, disable 'unused' warnings since they will mostly be 1308 // noise. Don't warn for a use from a module: either we should warn on all 1309 // file-scope declarations in modules or not at all, but whether the 1310 // declaration is used is immaterial. 1311 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) { 1312 // Output warning for unused file scoped decls. 1313 for (UnusedFileScopedDeclsType::iterator 1314 I = UnusedFileScopedDecls.begin(ExternalSource), 1315 E = UnusedFileScopedDecls.end(); I != E; ++I) { 1316 if (ShouldRemoveFromUnused(this, *I)) 1317 continue; 1318 1319 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { 1320 const FunctionDecl *DiagD; 1321 if (!FD->hasBody(DiagD)) 1322 DiagD = FD; 1323 if (DiagD->isDeleted()) 1324 continue; // Deleted functions are supposed to be unused. 1325 if (DiagD->isReferenced()) { 1326 if (isa<CXXMethodDecl>(DiagD)) 1327 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function) 1328 << DiagD; 1329 else { 1330 if (FD->getStorageClass() == SC_Static && 1331 !FD->isInlineSpecified() && 1332 !SourceMgr.isInMainFile( 1333 SourceMgr.getExpansionLoc(FD->getLocation()))) 1334 Diag(DiagD->getLocation(), 1335 diag::warn_unneeded_static_internal_decl) 1336 << DiagD; 1337 else 1338 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1339 << /*function*/ 0 << DiagD; 1340 } 1341 } else { 1342 if (FD->getDescribedFunctionTemplate()) 1343 Diag(DiagD->getLocation(), diag::warn_unused_template) 1344 << /*function*/ 0 << DiagD; 1345 else 1346 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD) 1347 ? diag::warn_unused_member_function 1348 : diag::warn_unused_function) 1349 << DiagD; 1350 } 1351 } else { 1352 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition(); 1353 if (!DiagD) 1354 DiagD = cast<VarDecl>(*I); 1355 if (DiagD->isReferenced()) { 1356 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1357 << /*variable*/ 1 << DiagD; 1358 } else if (DiagD->getType().isConstQualified()) { 1359 const SourceManager &SM = SourceMgr; 1360 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) || 1361 !PP.getLangOpts().IsHeaderFile) 1362 Diag(DiagD->getLocation(), diag::warn_unused_const_variable) 1363 << DiagD; 1364 } else { 1365 if (DiagD->getDescribedVarTemplate()) 1366 Diag(DiagD->getLocation(), diag::warn_unused_template) 1367 << /*variable*/ 1 << DiagD; 1368 else 1369 Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD; 1370 } 1371 } 1372 } 1373 1374 emitAndClearUnusedLocalTypedefWarnings(); 1375 } 1376 1377 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) { 1378 // FIXME: Load additional unused private field candidates from the external 1379 // source. 1380 RecordCompleteMap RecordsComplete; 1381 RecordCompleteMap MNCComplete; 1382 for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(), 1383 E = UnusedPrivateFields.end(); I != E; ++I) { 1384 const NamedDecl *D = *I; 1385 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); 1386 if (RD && !RD->isUnion() && 1387 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) { 1388 Diag(D->getLocation(), diag::warn_unused_private_field) 1389 << D->getDeclName(); 1390 } 1391 } 1392 } 1393 1394 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { 1395 if (ExternalSource) 1396 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); 1397 for (const auto &DeletedFieldInfo : DeleteExprs) { 1398 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { 1399 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, 1400 DeleteExprLoc.second); 1401 } 1402 } 1403 } 1404 1405 // Check we've noticed that we're no longer parsing the initializer for every 1406 // variable. If we miss cases, then at best we have a performance issue and 1407 // at worst a rejects-valid bug. 1408 assert(ParsingInitForAutoVars.empty() && 1409 "Didn't unmark var as having its initializer parsed"); 1410 1411 if (!PP.isIncrementalProcessingEnabled()) 1412 TUScope = nullptr; 1413 } 1414 1415 1416 //===----------------------------------------------------------------------===// 1417 // Helper functions. 1418 //===----------------------------------------------------------------------===// 1419 1420 DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) { 1421 DeclContext *DC = CurContext; 1422 1423 while (true) { 1424 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) || 1425 isa<RequiresExprBodyDecl>(DC)) { 1426 DC = DC->getParent(); 1427 } else if (!AllowLambda && isa<CXXMethodDecl>(DC) && 1428 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call && 1429 cast<CXXRecordDecl>(DC->getParent())->isLambda()) { 1430 DC = DC->getParent()->getParent(); 1431 } else break; 1432 } 1433 1434 return DC; 1435 } 1436 1437 /// getCurFunctionDecl - If inside of a function body, this returns a pointer 1438 /// to the function decl for the function being parsed. If we're currently 1439 /// in a 'block', this returns the containing context. 1440 FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) { 1441 DeclContext *DC = getFunctionLevelDeclContext(AllowLambda); 1442 return dyn_cast<FunctionDecl>(DC); 1443 } 1444 1445 ObjCMethodDecl *Sema::getCurMethodDecl() { 1446 DeclContext *DC = getFunctionLevelDeclContext(); 1447 while (isa<RecordDecl>(DC)) 1448 DC = DC->getParent(); 1449 return dyn_cast<ObjCMethodDecl>(DC); 1450 } 1451 1452 NamedDecl *Sema::getCurFunctionOrMethodDecl() { 1453 DeclContext *DC = getFunctionLevelDeclContext(); 1454 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC)) 1455 return cast<NamedDecl>(DC); 1456 return nullptr; 1457 } 1458 1459 LangAS Sema::getDefaultCXXMethodAddrSpace() const { 1460 if (getLangOpts().OpenCL) 1461 return getASTContext().getDefaultOpenCLPointeeAddrSpace(); 1462 return LangAS::Default; 1463 } 1464 1465 void Sema::EmitCurrentDiagnostic(unsigned DiagID) { 1466 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here 1467 // and yet we also use the current diag ID on the DiagnosticsEngine. This has 1468 // been made more painfully obvious by the refactor that introduced this 1469 // function, but it is possible that the incoming argument can be 1470 // eliminated. If it truly cannot be (for example, there is some reentrancy 1471 // issue I am not seeing yet), then there should at least be a clarifying 1472 // comment somewhere. 1473 if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) { 1474 switch (DiagnosticIDs::getDiagnosticSFINAEResponse( 1475 Diags.getCurrentDiagID())) { 1476 case DiagnosticIDs::SFINAE_Report: 1477 // We'll report the diagnostic below. 1478 break; 1479 1480 case DiagnosticIDs::SFINAE_SubstitutionFailure: 1481 // Count this failure so that we know that template argument deduction 1482 // has failed. 1483 ++NumSFINAEErrors; 1484 1485 // Make a copy of this suppressed diagnostic and store it with the 1486 // template-deduction information. 1487 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1488 Diagnostic DiagInfo(&Diags); 1489 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1490 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1491 } 1492 1493 Diags.setLastDiagnosticIgnored(true); 1494 Diags.Clear(); 1495 return; 1496 1497 case DiagnosticIDs::SFINAE_AccessControl: { 1498 // Per C++ Core Issue 1170, access control is part of SFINAE. 1499 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily 1500 // make access control a part of SFINAE for the purposes of checking 1501 // type traits. 1502 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11) 1503 break; 1504 1505 SourceLocation Loc = Diags.getCurrentDiagLoc(); 1506 1507 // Suppress this diagnostic. 1508 ++NumSFINAEErrors; 1509 1510 // Make a copy of this suppressed diagnostic and store it with the 1511 // template-deduction information. 1512 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1513 Diagnostic DiagInfo(&Diags); 1514 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1515 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1516 } 1517 1518 Diags.setLastDiagnosticIgnored(true); 1519 Diags.Clear(); 1520 1521 // Now the diagnostic state is clear, produce a C++98 compatibility 1522 // warning. 1523 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control); 1524 1525 // The last diagnostic which Sema produced was ignored. Suppress any 1526 // notes attached to it. 1527 Diags.setLastDiagnosticIgnored(true); 1528 return; 1529 } 1530 1531 case DiagnosticIDs::SFINAE_Suppress: 1532 // Make a copy of this suppressed diagnostic and store it with the 1533 // template-deduction information; 1534 if (*Info) { 1535 Diagnostic DiagInfo(&Diags); 1536 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(), 1537 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1538 } 1539 1540 // Suppress this diagnostic. 1541 Diags.setLastDiagnosticIgnored(true); 1542 Diags.Clear(); 1543 return; 1544 } 1545 } 1546 1547 // Copy the diagnostic printing policy over the ASTContext printing policy. 1548 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292 1549 Context.setPrintingPolicy(getPrintingPolicy()); 1550 1551 // Emit the diagnostic. 1552 if (!Diags.EmitCurrentDiagnostic()) 1553 return; 1554 1555 // If this is not a note, and we're in a template instantiation 1556 // that is different from the last template instantiation where 1557 // we emitted an error, print a template instantiation 1558 // backtrace. 1559 if (!DiagnosticIDs::isBuiltinNote(DiagID)) 1560 PrintContextStack(); 1561 } 1562 1563 Sema::SemaDiagnosticBuilder 1564 Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) { 1565 return Diag(Loc, PD.getDiagID(), DeferHint) << PD; 1566 } 1567 1568 bool Sema::hasUncompilableErrorOccurred() const { 1569 if (getDiagnostics().hasUncompilableErrorOccurred()) 1570 return true; 1571 auto *FD = dyn_cast<FunctionDecl>(CurContext); 1572 if (!FD) 1573 return false; 1574 auto Loc = DeviceDeferredDiags.find(FD); 1575 if (Loc == DeviceDeferredDiags.end()) 1576 return false; 1577 for (auto PDAt : Loc->second) { 1578 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID())) 1579 return true; 1580 } 1581 return false; 1582 } 1583 1584 // Print notes showing how we can reach FD starting from an a priori 1585 // known-callable function. 1586 static void emitCallStackNotes(Sema &S, FunctionDecl *FD) { 1587 auto FnIt = S.DeviceKnownEmittedFns.find(FD); 1588 while (FnIt != S.DeviceKnownEmittedFns.end()) { 1589 // Respect error limit. 1590 if (S.Diags.hasFatalErrorOccurred()) 1591 return; 1592 DiagnosticBuilder Builder( 1593 S.Diags.Report(FnIt->second.Loc, diag::note_called_by)); 1594 Builder << FnIt->second.FD; 1595 FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD); 1596 } 1597 } 1598 1599 namespace { 1600 1601 /// Helper class that emits deferred diagnostic messages if an entity directly 1602 /// or indirectly using the function that causes the deferred diagnostic 1603 /// messages is known to be emitted. 1604 /// 1605 /// During parsing of AST, certain diagnostic messages are recorded as deferred 1606 /// diagnostics since it is unknown whether the functions containing such 1607 /// diagnostics will be emitted. A list of potentially emitted functions and 1608 /// variables that may potentially trigger emission of functions are also 1609 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions 1610 /// by each function to emit deferred diagnostics. 1611 /// 1612 /// During the visit, certain OpenMP directives or initializer of variables 1613 /// with certain OpenMP attributes will cause subsequent visiting of any 1614 /// functions enter a state which is called OpenMP device context in this 1615 /// implementation. The state is exited when the directive or initializer is 1616 /// exited. This state can change the emission states of subsequent uses 1617 /// of functions. 1618 /// 1619 /// Conceptually the functions or variables to be visited form a use graph 1620 /// where the parent node uses the child node. At any point of the visit, 1621 /// the tree nodes traversed from the tree root to the current node form a use 1622 /// stack. The emission state of the current node depends on two factors: 1623 /// 1. the emission state of the root node 1624 /// 2. whether the current node is in OpenMP device context 1625 /// If the function is decided to be emitted, its contained deferred diagnostics 1626 /// are emitted, together with the information about the use stack. 1627 /// 1628 class DeferredDiagnosticsEmitter 1629 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> { 1630 public: 1631 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited; 1632 1633 // Whether the function is already in the current use-path. 1634 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath; 1635 1636 // The current use-path. 1637 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath; 1638 1639 // Whether the visiting of the function has been done. Done[0] is for the 1640 // case not in OpenMP device context. Done[1] is for the case in OpenMP 1641 // device context. We need two sets because diagnostics emission may be 1642 // different depending on whether it is in OpenMP device context. 1643 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2]; 1644 1645 // Emission state of the root node of the current use graph. 1646 bool ShouldEmitRootNode; 1647 1648 // Current OpenMP device context level. It is initialized to 0 and each 1649 // entering of device context increases it by 1 and each exit decreases 1650 // it by 1. Non-zero value indicates it is currently in device context. 1651 unsigned InOMPDeviceContext; 1652 1653 DeferredDiagnosticsEmitter(Sema &S) 1654 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {} 1655 1656 bool shouldVisitDiscardedStmt() const { return false; } 1657 1658 void VisitOMPTargetDirective(OMPTargetDirective *Node) { 1659 ++InOMPDeviceContext; 1660 Inherited::VisitOMPTargetDirective(Node); 1661 --InOMPDeviceContext; 1662 } 1663 1664 void visitUsedDecl(SourceLocation Loc, Decl *D) { 1665 if (isa<VarDecl>(D)) 1666 return; 1667 if (auto *FD = dyn_cast<FunctionDecl>(D)) 1668 checkFunc(Loc, FD); 1669 else 1670 Inherited::visitUsedDecl(Loc, D); 1671 } 1672 1673 void checkVar(VarDecl *VD) { 1674 assert(VD->isFileVarDecl() && 1675 "Should only check file-scope variables"); 1676 if (auto *Init = VD->getInit()) { 1677 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); 1678 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost || 1679 *DevTy == OMPDeclareTargetDeclAttr::DT_Any); 1680 if (IsDev) 1681 ++InOMPDeviceContext; 1682 this->Visit(Init); 1683 if (IsDev) 1684 --InOMPDeviceContext; 1685 } 1686 } 1687 1688 void checkFunc(SourceLocation Loc, FunctionDecl *FD) { 1689 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0]; 1690 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back(); 1691 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) || 1692 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD)) 1693 return; 1694 // Finalize analysis of OpenMP-specific constructs. 1695 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 && 1696 (ShouldEmitRootNode || InOMPDeviceContext)) 1697 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); 1698 if (Caller) 1699 S.DeviceKnownEmittedFns[FD] = {Caller, Loc}; 1700 // Always emit deferred diagnostics for the direct users. This does not 1701 // lead to explosion of diagnostics since each user is visited at most 1702 // twice. 1703 if (ShouldEmitRootNode || InOMPDeviceContext) 1704 emitDeferredDiags(FD, Caller); 1705 // Do not revisit a function if the function body has been completely 1706 // visited before. 1707 if (!Done.insert(FD).second) 1708 return; 1709 InUsePath.insert(FD); 1710 UsePath.push_back(FD); 1711 if (auto *S = FD->getBody()) { 1712 this->Visit(S); 1713 } 1714 UsePath.pop_back(); 1715 InUsePath.erase(FD); 1716 } 1717 1718 void checkRecordedDecl(Decl *D) { 1719 if (auto *FD = dyn_cast<FunctionDecl>(D)) { 1720 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) == 1721 Sema::FunctionEmissionStatus::Emitted; 1722 checkFunc(SourceLocation(), FD); 1723 } else 1724 checkVar(cast<VarDecl>(D)); 1725 } 1726 1727 // Emit any deferred diagnostics for FD 1728 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) { 1729 auto It = S.DeviceDeferredDiags.find(FD); 1730 if (It == S.DeviceDeferredDiags.end()) 1731 return; 1732 bool HasWarningOrError = false; 1733 bool FirstDiag = true; 1734 for (PartialDiagnosticAt &PDAt : It->second) { 1735 // Respect error limit. 1736 if (S.Diags.hasFatalErrorOccurred()) 1737 return; 1738 const SourceLocation &Loc = PDAt.first; 1739 const PartialDiagnostic &PD = PDAt.second; 1740 HasWarningOrError |= 1741 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >= 1742 DiagnosticsEngine::Warning; 1743 { 1744 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID())); 1745 PD.Emit(Builder); 1746 } 1747 // Emit the note on the first diagnostic in case too many diagnostics 1748 // cause the note not emitted. 1749 if (FirstDiag && HasWarningOrError && ShowCallStack) { 1750 emitCallStackNotes(S, FD); 1751 FirstDiag = false; 1752 } 1753 } 1754 } 1755 }; 1756 } // namespace 1757 1758 void Sema::emitDeferredDiags() { 1759 if (ExternalSource) 1760 ExternalSource->ReadDeclsToCheckForDeferredDiags( 1761 DeclsToCheckForDeferredDiags); 1762 1763 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) || 1764 DeclsToCheckForDeferredDiags.empty()) 1765 return; 1766 1767 DeferredDiagnosticsEmitter DDE(*this); 1768 for (auto D : DeclsToCheckForDeferredDiags) 1769 DDE.checkRecordedDecl(D); 1770 } 1771 1772 // In CUDA, there are some constructs which may appear in semantically-valid 1773 // code, but trigger errors if we ever generate code for the function in which 1774 // they appear. Essentially every construct you're not allowed to use on the 1775 // device falls into this category, because you are allowed to use these 1776 // constructs in a __host__ __device__ function, but only if that function is 1777 // never codegen'ed on the device. 1778 // 1779 // To handle semantic checking for these constructs, we keep track of the set of 1780 // functions we know will be emitted, either because we could tell a priori that 1781 // they would be emitted, or because they were transitively called by a 1782 // known-emitted function. 1783 // 1784 // We also keep a partial call graph of which not-known-emitted functions call 1785 // which other not-known-emitted functions. 1786 // 1787 // When we see something which is illegal if the current function is emitted 1788 // (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or 1789 // CheckCUDACall), we first check if the current function is known-emitted. If 1790 // so, we immediately output the diagnostic. 1791 // 1792 // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags 1793 // until we discover that the function is known-emitted, at which point we take 1794 // it out of this map and emit the diagnostic. 1795 1796 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc, 1797 unsigned DiagID, 1798 FunctionDecl *Fn, Sema &S) 1799 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn), 1800 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) { 1801 switch (K) { 1802 case K_Nop: 1803 break; 1804 case K_Immediate: 1805 case K_ImmediateWithCallStack: 1806 ImmediateDiag.emplace( 1807 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID)); 1808 break; 1809 case K_Deferred: 1810 assert(Fn && "Must have a function to attach the deferred diag to."); 1811 auto &Diags = S.DeviceDeferredDiags[Fn]; 1812 PartialDiagId.emplace(Diags.size()); 1813 Diags.emplace_back(Loc, S.PDiag(DiagID)); 1814 break; 1815 } 1816 } 1817 1818 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D) 1819 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn), 1820 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag), 1821 PartialDiagId(D.PartialDiagId) { 1822 // Clean the previous diagnostics. 1823 D.ShowCallStack = false; 1824 D.ImmediateDiag.reset(); 1825 D.PartialDiagId.reset(); 1826 } 1827 1828 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() { 1829 if (ImmediateDiag) { 1830 // Emit our diagnostic and, if it was a warning or error, output a callstack 1831 // if Fn isn't a priori known-emitted. 1832 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel( 1833 DiagID, Loc) >= DiagnosticsEngine::Warning; 1834 ImmediateDiag.reset(); // Emit the immediate diag. 1835 if (IsWarningOrError && ShowCallStack) 1836 emitCallStackNotes(S, Fn); 1837 } else { 1838 assert((!PartialDiagId || ShowCallStack) && 1839 "Must always show call stack for deferred diags."); 1840 } 1841 } 1842 1843 Sema::SemaDiagnosticBuilder 1844 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) { 1845 FD = FD ? FD : getCurFunctionDecl(); 1846 if (LangOpts.OpenMP) 1847 return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD) 1848 : diagIfOpenMPHostCode(Loc, DiagID, FD); 1849 if (getLangOpts().CUDA) 1850 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID) 1851 : CUDADiagIfHostCode(Loc, DiagID); 1852 1853 if (getLangOpts().SYCLIsDevice) 1854 return SYCLDiagIfDeviceCode(Loc, DiagID); 1855 1856 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID, 1857 FD, *this); 1858 } 1859 1860 Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID, 1861 bool DeferHint) { 1862 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID); 1863 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag && 1864 DiagnosticIDs::isDeferrable(DiagID) && 1865 (DeferHint || DeferDiags || !IsError); 1866 auto SetIsLastErrorImmediate = [&](bool Flag) { 1867 if (IsError) 1868 IsLastErrorImmediate = Flag; 1869 }; 1870 if (!ShouldDefer) { 1871 SetIsLastErrorImmediate(true); 1872 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, 1873 DiagID, getCurFunctionDecl(), *this); 1874 } 1875 1876 SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice 1877 ? CUDADiagIfDeviceCode(Loc, DiagID) 1878 : CUDADiagIfHostCode(Loc, DiagID); 1879 SetIsLastErrorImmediate(DB.isImmediate()); 1880 return DB; 1881 } 1882 1883 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) { 1884 if (isUnevaluatedContext() || Ty.isNull()) 1885 return; 1886 1887 // The original idea behind checkTypeSupport function is that unused 1888 // declarations can be replaced with an array of bytes of the same size during 1889 // codegen, such replacement doesn't seem to be possible for types without 1890 // constant byte size like zero length arrays. So, do a deep check for SYCL. 1891 if (D && LangOpts.SYCLIsDevice) { 1892 llvm::DenseSet<QualType> Visited; 1893 deepTypeCheckForSYCLDevice(Loc, Visited, D); 1894 } 1895 1896 Decl *C = cast<Decl>(getCurLexicalContext()); 1897 1898 // Memcpy operations for structs containing a member with unsupported type 1899 // are ok, though. 1900 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) { 1901 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 1902 MD->isTrivial()) 1903 return; 1904 1905 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD)) 1906 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial()) 1907 return; 1908 } 1909 1910 // Try to associate errors with the lexical context, if that is a function, or 1911 // the value declaration otherwise. 1912 FunctionDecl *FD = isa<FunctionDecl>(C) ? cast<FunctionDecl>(C) 1913 : dyn_cast_or_null<FunctionDecl>(D); 1914 1915 auto CheckDeviceType = [&](QualType Ty) { 1916 if (Ty->isDependentType()) 1917 return; 1918 1919 if (Ty->isBitIntType()) { 1920 if (!Context.getTargetInfo().hasBitIntType()) { 1921 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1922 if (D) 1923 PD << D; 1924 else 1925 PD << "expression"; 1926 targetDiag(Loc, PD, FD) 1927 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/ 1928 << Ty << Context.getTargetInfo().getTriple().str(); 1929 } 1930 return; 1931 } 1932 1933 // Check if we are dealing with two 'long double' but with different 1934 // semantics. 1935 bool LongDoubleMismatched = false; 1936 if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) { 1937 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty); 1938 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() && 1939 !Context.getTargetInfo().hasFloat128Type()) || 1940 (&Sem == &llvm::APFloat::PPCDoubleDouble() && 1941 !Context.getTargetInfo().hasIbm128Type())) 1942 LongDoubleMismatched = true; 1943 } 1944 1945 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) || 1946 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) || 1947 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) || 1948 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 && 1949 !Context.getTargetInfo().hasInt128Type()) || 1950 LongDoubleMismatched) { 1951 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1952 if (D) 1953 PD << D; 1954 else 1955 PD << "expression"; 1956 1957 if (targetDiag(Loc, PD, FD) 1958 << true /*show bit size*/ 1959 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty 1960 << false /*return*/ << Context.getTargetInfo().getTriple().str()) { 1961 if (D) 1962 D->setInvalidDecl(); 1963 } 1964 if (D) 1965 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1966 } 1967 }; 1968 1969 auto CheckType = [&](QualType Ty, bool IsRetTy = false) { 1970 if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice) || 1971 LangOpts.CUDAIsDevice) 1972 CheckDeviceType(Ty); 1973 1974 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType(); 1975 const TargetInfo &TI = Context.getTargetInfo(); 1976 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) { 1977 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1978 if (D) 1979 PD << D; 1980 else 1981 PD << "expression"; 1982 1983 if (Diag(Loc, PD, FD) 1984 << false /*show bit size*/ << 0 << Ty << false /*return*/ 1985 << Context.getTargetInfo().getTriple().str()) { 1986 if (D) 1987 D->setInvalidDecl(); 1988 } 1989 if (D) 1990 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1991 } 1992 1993 bool IsDouble = UnqualTy == Context.DoubleTy; 1994 bool IsFloat = UnqualTy == Context.FloatTy; 1995 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) { 1996 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1997 if (D) 1998 PD << D; 1999 else 2000 PD << "expression"; 2001 2002 if (Diag(Loc, PD, FD) 2003 << false /*show bit size*/ << 0 << Ty << true /*return*/ 2004 << Context.getTargetInfo().getTriple().str()) { 2005 if (D) 2006 D->setInvalidDecl(); 2007 } 2008 if (D) 2009 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 2010 } 2011 }; 2012 2013 CheckType(Ty); 2014 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) { 2015 for (const auto &ParamTy : FPTy->param_types()) 2016 CheckType(ParamTy); 2017 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true); 2018 } 2019 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty)) 2020 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true); 2021 } 2022 2023 /// Looks through the macro-expansion chain for the given 2024 /// location, looking for a macro expansion with the given name. 2025 /// If one is found, returns true and sets the location to that 2026 /// expansion loc. 2027 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) { 2028 SourceLocation loc = locref; 2029 if (!loc.isMacroID()) return false; 2030 2031 // There's no good way right now to look at the intermediate 2032 // expansions, so just jump to the expansion location. 2033 loc = getSourceManager().getExpansionLoc(loc); 2034 2035 // If that's written with the name, stop here. 2036 SmallString<16> buffer; 2037 if (getPreprocessor().getSpelling(loc, buffer) == name) { 2038 locref = loc; 2039 return true; 2040 } 2041 return false; 2042 } 2043 2044 /// Determines the active Scope associated with the given declaration 2045 /// context. 2046 /// 2047 /// This routine maps a declaration context to the active Scope object that 2048 /// represents that declaration context in the parser. It is typically used 2049 /// from "scope-less" code (e.g., template instantiation, lazy creation of 2050 /// declarations) that injects a name for name-lookup purposes and, therefore, 2051 /// must update the Scope. 2052 /// 2053 /// \returns The scope corresponding to the given declaraion context, or NULL 2054 /// if no such scope is open. 2055 Scope *Sema::getScopeForContext(DeclContext *Ctx) { 2056 2057 if (!Ctx) 2058 return nullptr; 2059 2060 Ctx = Ctx->getPrimaryContext(); 2061 for (Scope *S = getCurScope(); S; S = S->getParent()) { 2062 // Ignore scopes that cannot have declarations. This is important for 2063 // out-of-line definitions of static class members. 2064 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) 2065 if (DeclContext *Entity = S->getEntity()) 2066 if (Ctx == Entity->getPrimaryContext()) 2067 return S; 2068 } 2069 2070 return nullptr; 2071 } 2072 2073 /// Enter a new function scope 2074 void Sema::PushFunctionScope() { 2075 if (FunctionScopes.empty() && CachedFunctionScope) { 2076 // Use CachedFunctionScope to avoid allocating memory when possible. 2077 CachedFunctionScope->Clear(); 2078 FunctionScopes.push_back(CachedFunctionScope.release()); 2079 } else { 2080 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); 2081 } 2082 if (LangOpts.OpenMP) 2083 pushOpenMPFunctionRegion(); 2084 } 2085 2086 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { 2087 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(), 2088 BlockScope, Block)); 2089 } 2090 2091 LambdaScopeInfo *Sema::PushLambdaScope() { 2092 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics()); 2093 FunctionScopes.push_back(LSI); 2094 return LSI; 2095 } 2096 2097 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) { 2098 if (LambdaScopeInfo *const LSI = getCurLambda()) { 2099 LSI->AutoTemplateParameterDepth = Depth; 2100 return; 2101 } 2102 llvm_unreachable( 2103 "Remove assertion if intentionally called in a non-lambda context."); 2104 } 2105 2106 // Check that the type of the VarDecl has an accessible copy constructor and 2107 // resolve its destructor's exception specification. 2108 // This also performs initialization of block variables when they are moved 2109 // to the heap. It uses the same rules as applicable for implicit moves 2110 // according to the C++ standard in effect ([class.copy.elision]p3). 2111 static void checkEscapingByref(VarDecl *VD, Sema &S) { 2112 QualType T = VD->getType(); 2113 EnterExpressionEvaluationContext scope( 2114 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated); 2115 SourceLocation Loc = VD->getLocation(); 2116 Expr *VarRef = 2117 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); 2118 ExprResult Result; 2119 auto IE = InitializedEntity::InitializeBlock(Loc, T); 2120 if (S.getLangOpts().CPlusPlus2b) { 2121 auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr, 2122 VK_XValue, FPOptionsOverride()); 2123 Result = S.PerformCopyInitialization(IE, SourceLocation(), E); 2124 } else { 2125 Result = S.PerformMoveOrCopyInitialization( 2126 IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible}, 2127 VarRef); 2128 } 2129 2130 if (!Result.isInvalid()) { 2131 Result = S.MaybeCreateExprWithCleanups(Result); 2132 Expr *Init = Result.getAs<Expr>(); 2133 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init)); 2134 } 2135 2136 // The destructor's exception specification is needed when IRGen generates 2137 // block copy/destroy functions. Resolve it here. 2138 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 2139 if (CXXDestructorDecl *DD = RD->getDestructor()) { 2140 auto *FPT = DD->getType()->getAs<FunctionProtoType>(); 2141 S.ResolveExceptionSpec(Loc, FPT); 2142 } 2143 } 2144 2145 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { 2146 // Set the EscapingByref flag of __block variables captured by 2147 // escaping blocks. 2148 for (const BlockDecl *BD : FSI.Blocks) { 2149 for (const BlockDecl::Capture &BC : BD->captures()) { 2150 VarDecl *VD = BC.getVariable(); 2151 if (VD->hasAttr<BlocksAttr>()) { 2152 // Nothing to do if this is a __block variable captured by a 2153 // non-escaping block. 2154 if (BD->doesNotEscape()) 2155 continue; 2156 VD->setEscapingByref(); 2157 } 2158 // Check whether the captured variable is or contains an object of 2159 // non-trivial C union type. 2160 QualType CapType = BC.getVariable()->getType(); 2161 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() || 2162 CapType.hasNonTrivialToPrimitiveCopyCUnion()) 2163 S.checkNonTrivialCUnion(BC.getVariable()->getType(), 2164 BD->getCaretLocation(), 2165 Sema::NTCUC_BlockCapture, 2166 Sema::NTCUK_Destruct|Sema::NTCUK_Copy); 2167 } 2168 } 2169 2170 for (VarDecl *VD : FSI.ByrefBlockVars) { 2171 // __block variables might require us to capture a copy-initializer. 2172 if (!VD->isEscapingByref()) 2173 continue; 2174 // It's currently invalid to ever have a __block variable with an 2175 // array type; should we diagnose that here? 2176 // Regardless, we don't want to ignore array nesting when 2177 // constructing this copy. 2178 if (VD->getType()->isStructureOrClassType()) 2179 checkEscapingByref(VD, S); 2180 } 2181 } 2182 2183 /// Pop a function (or block or lambda or captured region) scope from the stack. 2184 /// 2185 /// \param WP The warning policy to use for CFG-based warnings, or null if such 2186 /// warnings should not be produced. 2187 /// \param D The declaration corresponding to this function scope, if producing 2188 /// CFG-based warnings. 2189 /// \param BlockType The type of the block expression, if D is a BlockDecl. 2190 Sema::PoppedFunctionScopePtr 2191 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, 2192 const Decl *D, QualType BlockType) { 2193 assert(!FunctionScopes.empty() && "mismatched push/pop!"); 2194 2195 markEscapingByrefs(*FunctionScopes.back(), *this); 2196 2197 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(), 2198 PoppedFunctionScopeDeleter(this)); 2199 2200 if (LangOpts.OpenMP) 2201 popOpenMPFunctionRegion(Scope.get()); 2202 2203 // Issue any analysis-based warnings. 2204 if (WP && D) 2205 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType); 2206 else 2207 for (const auto &PUD : Scope->PossiblyUnreachableDiags) 2208 Diag(PUD.Loc, PUD.PD); 2209 2210 return Scope; 2211 } 2212 2213 void Sema::PoppedFunctionScopeDeleter:: 2214 operator()(sema::FunctionScopeInfo *Scope) const { 2215 // Stash the function scope for later reuse if it's for a normal function. 2216 if (Scope->isPlainFunction() && !Self->CachedFunctionScope) 2217 Self->CachedFunctionScope.reset(Scope); 2218 else 2219 delete Scope; 2220 } 2221 2222 void Sema::PushCompoundScope(bool IsStmtExpr) { 2223 getCurFunction()->CompoundScopes.push_back( 2224 CompoundScopeInfo(IsStmtExpr, getCurFPFeatures())); 2225 } 2226 2227 void Sema::PopCompoundScope() { 2228 FunctionScopeInfo *CurFunction = getCurFunction(); 2229 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop"); 2230 2231 CurFunction->CompoundScopes.pop_back(); 2232 } 2233 2234 /// Determine whether any errors occurred within this function/method/ 2235 /// block. 2236 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const { 2237 return getCurFunction()->hasUnrecoverableErrorOccurred(); 2238 } 2239 2240 void Sema::setFunctionHasBranchIntoScope() { 2241 if (!FunctionScopes.empty()) 2242 FunctionScopes.back()->setHasBranchIntoScope(); 2243 } 2244 2245 void Sema::setFunctionHasBranchProtectedScope() { 2246 if (!FunctionScopes.empty()) 2247 FunctionScopes.back()->setHasBranchProtectedScope(); 2248 } 2249 2250 void Sema::setFunctionHasIndirectGoto() { 2251 if (!FunctionScopes.empty()) 2252 FunctionScopes.back()->setHasIndirectGoto(); 2253 } 2254 2255 void Sema::setFunctionHasMustTail() { 2256 if (!FunctionScopes.empty()) 2257 FunctionScopes.back()->setHasMustTail(); 2258 } 2259 2260 BlockScopeInfo *Sema::getCurBlock() { 2261 if (FunctionScopes.empty()) 2262 return nullptr; 2263 2264 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back()); 2265 if (CurBSI && CurBSI->TheDecl && 2266 !CurBSI->TheDecl->Encloses(CurContext)) { 2267 // We have switched contexts due to template instantiation. 2268 assert(!CodeSynthesisContexts.empty()); 2269 return nullptr; 2270 } 2271 2272 return CurBSI; 2273 } 2274 2275 FunctionScopeInfo *Sema::getEnclosingFunction() const { 2276 if (FunctionScopes.empty()) 2277 return nullptr; 2278 2279 for (int e = FunctionScopes.size() - 1; e >= 0; --e) { 2280 if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) 2281 continue; 2282 return FunctionScopes[e]; 2283 } 2284 return nullptr; 2285 } 2286 2287 LambdaScopeInfo *Sema::getEnclosingLambda() const { 2288 for (auto *Scope : llvm::reverse(FunctionScopes)) { 2289 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) { 2290 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) { 2291 // We have switched contexts due to template instantiation. 2292 // FIXME: We should swap out the FunctionScopes during code synthesis 2293 // so that we don't need to check for this. 2294 assert(!CodeSynthesisContexts.empty()); 2295 return nullptr; 2296 } 2297 return LSI; 2298 } 2299 } 2300 return nullptr; 2301 } 2302 2303 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) { 2304 if (FunctionScopes.empty()) 2305 return nullptr; 2306 2307 auto I = FunctionScopes.rbegin(); 2308 if (IgnoreNonLambdaCapturingScope) { 2309 auto E = FunctionScopes.rend(); 2310 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I)) 2311 ++I; 2312 if (I == E) 2313 return nullptr; 2314 } 2315 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I); 2316 if (CurLSI && CurLSI->Lambda && 2317 !CurLSI->Lambda->Encloses(CurContext)) { 2318 // We have switched contexts due to template instantiation. 2319 assert(!CodeSynthesisContexts.empty()); 2320 return nullptr; 2321 } 2322 2323 return CurLSI; 2324 } 2325 2326 // We have a generic lambda if we parsed auto parameters, or we have 2327 // an associated template parameter list. 2328 LambdaScopeInfo *Sema::getCurGenericLambda() { 2329 if (LambdaScopeInfo *LSI = getCurLambda()) { 2330 return (LSI->TemplateParams.size() || 2331 LSI->GLTemplateParameterList) ? LSI : nullptr; 2332 } 2333 return nullptr; 2334 } 2335 2336 2337 void Sema::ActOnComment(SourceRange Comment) { 2338 if (!LangOpts.RetainCommentsFromSystemHeaders && 2339 SourceMgr.isInSystemHeader(Comment.getBegin())) 2340 return; 2341 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false); 2342 if (RC.isAlmostTrailingComment()) { 2343 SourceRange MagicMarkerRange(Comment.getBegin(), 2344 Comment.getBegin().getLocWithOffset(3)); 2345 StringRef MagicMarkerText; 2346 switch (RC.getKind()) { 2347 case RawComment::RCK_OrdinaryBCPL: 2348 MagicMarkerText = "///<"; 2349 break; 2350 case RawComment::RCK_OrdinaryC: 2351 MagicMarkerText = "/**<"; 2352 break; 2353 default: 2354 llvm_unreachable("if this is an almost Doxygen comment, " 2355 "it should be ordinary"); 2356 } 2357 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) << 2358 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText); 2359 } 2360 Context.addComment(RC); 2361 } 2362 2363 // Pin this vtable to this file. 2364 ExternalSemaSource::~ExternalSemaSource() {} 2365 char ExternalSemaSource::ID; 2366 2367 void ExternalSemaSource::ReadMethodPool(Selector Sel) { } 2368 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { } 2369 2370 void ExternalSemaSource::ReadKnownNamespaces( 2371 SmallVectorImpl<NamespaceDecl *> &Namespaces) { 2372 } 2373 2374 void ExternalSemaSource::ReadUndefinedButUsed( 2375 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {} 2376 2377 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< 2378 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} 2379 2380 /// Figure out if an expression could be turned into a call. 2381 /// 2382 /// Use this when trying to recover from an error where the programmer may have 2383 /// written just the name of a function instead of actually calling it. 2384 /// 2385 /// \param E - The expression to examine. 2386 /// \param ZeroArgCallReturnTy - If the expression can be turned into a call 2387 /// with no arguments, this parameter is set to the type returned by such a 2388 /// call; otherwise, it is set to an empty QualType. 2389 /// \param OverloadSet - If the expression is an overloaded function 2390 /// name, this parameter is populated with the decls of the various overloads. 2391 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, 2392 UnresolvedSetImpl &OverloadSet) { 2393 ZeroArgCallReturnTy = QualType(); 2394 OverloadSet.clear(); 2395 2396 const OverloadExpr *Overloads = nullptr; 2397 bool IsMemExpr = false; 2398 if (E.getType() == Context.OverloadTy) { 2399 OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); 2400 2401 // Ignore overloads that are pointer-to-member constants. 2402 if (FR.HasFormOfMemberPointer) 2403 return false; 2404 2405 Overloads = FR.Expression; 2406 } else if (E.getType() == Context.BoundMemberTy) { 2407 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens()); 2408 IsMemExpr = true; 2409 } 2410 2411 bool Ambiguous = false; 2412 bool IsMV = false; 2413 2414 if (Overloads) { 2415 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), 2416 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { 2417 OverloadSet.addDecl(*it); 2418 2419 // Check whether the function is a non-template, non-member which takes no 2420 // arguments. 2421 if (IsMemExpr) 2422 continue; 2423 if (const FunctionDecl *OverloadDecl 2424 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { 2425 if (OverloadDecl->getMinRequiredArguments() == 0) { 2426 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous && 2427 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() || 2428 OverloadDecl->isCPUSpecificMultiVersion()))) { 2429 ZeroArgCallReturnTy = QualType(); 2430 Ambiguous = true; 2431 } else { 2432 ZeroArgCallReturnTy = OverloadDecl->getReturnType(); 2433 IsMV = OverloadDecl->isCPUDispatchMultiVersion() || 2434 OverloadDecl->isCPUSpecificMultiVersion(); 2435 } 2436 } 2437 } 2438 } 2439 2440 // If it's not a member, use better machinery to try to resolve the call 2441 if (!IsMemExpr) 2442 return !ZeroArgCallReturnTy.isNull(); 2443 } 2444 2445 // Attempt to call the member with no arguments - this will correctly handle 2446 // member templates with defaults/deduction of template arguments, overloads 2447 // with default arguments, etc. 2448 if (IsMemExpr && !E.isTypeDependent()) { 2449 Sema::TentativeAnalysisScope Trap(*this); 2450 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), 2451 None, SourceLocation()); 2452 if (R.isUsable()) { 2453 ZeroArgCallReturnTy = R.get()->getType(); 2454 return true; 2455 } 2456 return false; 2457 } 2458 2459 if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { 2460 if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { 2461 if (Fun->getMinRequiredArguments() == 0) 2462 ZeroArgCallReturnTy = Fun->getReturnType(); 2463 return true; 2464 } 2465 } 2466 2467 // We don't have an expression that's convenient to get a FunctionDecl from, 2468 // but we can at least check if the type is "function of 0 arguments". 2469 QualType ExprTy = E.getType(); 2470 const FunctionType *FunTy = nullptr; 2471 QualType PointeeTy = ExprTy->getPointeeType(); 2472 if (!PointeeTy.isNull()) 2473 FunTy = PointeeTy->getAs<FunctionType>(); 2474 if (!FunTy) 2475 FunTy = ExprTy->getAs<FunctionType>(); 2476 2477 if (const FunctionProtoType *FPT = 2478 dyn_cast_or_null<FunctionProtoType>(FunTy)) { 2479 if (FPT->getNumParams() == 0) 2480 ZeroArgCallReturnTy = FunTy->getReturnType(); 2481 return true; 2482 } 2483 return false; 2484 } 2485 2486 /// Give notes for a set of overloads. 2487 /// 2488 /// A companion to tryExprAsCall. In cases when the name that the programmer 2489 /// wrote was an overloaded function, we may be able to make some guesses about 2490 /// plausible overloads based on their return types; such guesses can be handed 2491 /// off to this method to be emitted as notes. 2492 /// 2493 /// \param Overloads - The overloads to note. 2494 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to 2495 /// -fshow-overloads=best, this is the location to attach to the note about too 2496 /// many candidates. Typically this will be the location of the original 2497 /// ill-formed expression. 2498 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, 2499 const SourceLocation FinalNoteLoc) { 2500 unsigned ShownOverloads = 0; 2501 unsigned SuppressedOverloads = 0; 2502 for (UnresolvedSetImpl::iterator It = Overloads.begin(), 2503 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2504 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) { 2505 ++SuppressedOverloads; 2506 continue; 2507 } 2508 2509 NamedDecl *Fn = (*It)->getUnderlyingDecl(); 2510 // Don't print overloads for non-default multiversioned functions. 2511 if (const auto *FD = Fn->getAsFunction()) { 2512 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() && 2513 !FD->getAttr<TargetAttr>()->isDefaultVersion()) 2514 continue; 2515 } 2516 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call); 2517 ++ShownOverloads; 2518 } 2519 2520 S.Diags.overloadCandidatesShown(ShownOverloads); 2521 2522 if (SuppressedOverloads) 2523 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates) 2524 << SuppressedOverloads; 2525 } 2526 2527 static void notePlausibleOverloads(Sema &S, SourceLocation Loc, 2528 const UnresolvedSetImpl &Overloads, 2529 bool (*IsPlausibleResult)(QualType)) { 2530 if (!IsPlausibleResult) 2531 return noteOverloads(S, Overloads, Loc); 2532 2533 UnresolvedSet<2> PlausibleOverloads; 2534 for (OverloadExpr::decls_iterator It = Overloads.begin(), 2535 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2536 const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It); 2537 QualType OverloadResultTy = OverloadDecl->getReturnType(); 2538 if (IsPlausibleResult(OverloadResultTy)) 2539 PlausibleOverloads.addDecl(It.getDecl()); 2540 } 2541 noteOverloads(S, PlausibleOverloads, Loc); 2542 } 2543 2544 /// Determine whether the given expression can be called by just 2545 /// putting parentheses after it. Notably, expressions with unary 2546 /// operators can't be because the unary operator will start parsing 2547 /// outside the call. 2548 static bool IsCallableWithAppend(Expr *E) { 2549 E = E->IgnoreImplicit(); 2550 return (!isa<CStyleCastExpr>(E) && 2551 !isa<UnaryOperator>(E) && 2552 !isa<BinaryOperator>(E) && 2553 !isa<CXXOperatorCallExpr>(E)); 2554 } 2555 2556 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) { 2557 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2558 E = UO->getSubExpr(); 2559 2560 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { 2561 if (ULE->getNumDecls() == 0) 2562 return false; 2563 2564 const NamedDecl *ND = *ULE->decls_begin(); 2565 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2566 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion(); 2567 } 2568 return false; 2569 } 2570 2571 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, 2572 bool ForceComplain, 2573 bool (*IsPlausibleResult)(QualType)) { 2574 SourceLocation Loc = E.get()->getExprLoc(); 2575 SourceRange Range = E.get()->getSourceRange(); 2576 UnresolvedSet<4> Overloads; 2577 2578 // If this is a SFINAE context, don't try anything that might trigger ADL 2579 // prematurely. 2580 if (!isSFINAEContext()) { 2581 QualType ZeroArgCallTy; 2582 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) && 2583 !ZeroArgCallTy.isNull() && 2584 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) { 2585 // At this point, we know E is potentially callable with 0 2586 // arguments and that it returns something of a reasonable type, 2587 // so we can emit a fixit and carry on pretending that E was 2588 // actually a CallExpr. 2589 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd()); 2590 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2591 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range 2592 << (IsCallableWithAppend(E.get()) 2593 ? FixItHint::CreateInsertion(ParenInsertionLoc, 2594 "()") 2595 : FixItHint()); 2596 if (!IsMV) 2597 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2598 2599 // FIXME: Try this before emitting the fixit, and suppress diagnostics 2600 // while doing so. 2601 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None, 2602 Range.getEnd().getLocWithOffset(1)); 2603 return true; 2604 } 2605 } 2606 if (!ForceComplain) return false; 2607 2608 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2609 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range; 2610 if (!IsMV) 2611 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2612 E = ExprError(); 2613 return true; 2614 } 2615 2616 IdentifierInfo *Sema::getSuperIdentifier() const { 2617 if (!Ident_super) 2618 Ident_super = &Context.Idents.get("super"); 2619 return Ident_super; 2620 } 2621 2622 IdentifierInfo *Sema::getFloat128Identifier() const { 2623 if (!Ident___float128) 2624 Ident___float128 = &Context.Idents.get("__float128"); 2625 return Ident___float128; 2626 } 2627 2628 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, 2629 CapturedRegionKind K, 2630 unsigned OpenMPCaptureLevel) { 2631 auto *CSI = new CapturedRegionScopeInfo( 2632 getDiagnostics(), S, CD, RD, CD->getContextParam(), K, 2633 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0, 2634 OpenMPCaptureLevel); 2635 CSI->ReturnType = Context.VoidTy; 2636 FunctionScopes.push_back(CSI); 2637 } 2638 2639 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { 2640 if (FunctionScopes.empty()) 2641 return nullptr; 2642 2643 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); 2644 } 2645 2646 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & 2647 Sema::getMismatchingDeleteExpressions() const { 2648 return DeleteExprs; 2649 } 2650 2651 Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S) 2652 : S(S), OldFPFeaturesState(S.CurFPFeatures), 2653 OldOverrides(S.FpPragmaStack.CurrentValue), 2654 OldEvalMethod(S.PP.getCurrentFPEvalMethod()), 2655 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {} 2656 2657 Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() { 2658 S.CurFPFeatures = OldFPFeaturesState; 2659 S.FpPragmaStack.CurrentValue = OldOverrides; 2660 S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod); 2661 } 2662