1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the actions class which performs semantic analysis and 10 // builds an AST out of a parse stream. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "UsedDeclVisitor.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/ASTDiagnostic.h" 17 #include "clang/AST/Decl.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclFriend.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/ExprCXX.h" 23 #include "clang/AST/PrettyDeclStackTrace.h" 24 #include "clang/AST/StmtCXX.h" 25 #include "clang/Basic/DarwinSDKInfo.h" 26 #include "clang/Basic/DiagnosticOptions.h" 27 #include "clang/Basic/PartialDiagnostic.h" 28 #include "clang/Basic/SourceManager.h" 29 #include "clang/Basic/Stack.h" 30 #include "clang/Basic/TargetInfo.h" 31 #include "clang/Lex/HeaderSearch.h" 32 #include "clang/Lex/HeaderSearchOptions.h" 33 #include "clang/Lex/Preprocessor.h" 34 #include "clang/Sema/CXXFieldCollector.h" 35 #include "clang/Sema/DelayedDiagnostic.h" 36 #include "clang/Sema/ExternalSemaSource.h" 37 #include "clang/Sema/Initialization.h" 38 #include "clang/Sema/MultiplexExternalSemaSource.h" 39 #include "clang/Sema/ObjCMethodList.h" 40 #include "clang/Sema/Scope.h" 41 #include "clang/Sema/ScopeInfo.h" 42 #include "clang/Sema/SemaConsumer.h" 43 #include "clang/Sema/SemaInternal.h" 44 #include "clang/Sema/TemplateDeduction.h" 45 #include "clang/Sema/TemplateInstCallback.h" 46 #include "clang/Sema/TypoCorrection.h" 47 #include "llvm/ADT/DenseMap.h" 48 #include "llvm/ADT/SmallPtrSet.h" 49 #include "llvm/Support/TimeProfiler.h" 50 51 using namespace clang; 52 using namespace sema; 53 54 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) { 55 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts); 56 } 57 58 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); } 59 60 DarwinSDKInfo * 61 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, 62 StringRef Platform) { 63 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking(); 64 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) { 65 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking) 66 << Platform; 67 WarnedDarwinSDKInfoMissing = true; 68 } 69 return SDKInfo; 70 } 71 72 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() { 73 if (CachedDarwinSDKInfo) 74 return CachedDarwinSDKInfo->get(); 75 auto SDKInfo = parseDarwinSDKInfo( 76 PP.getFileManager().getVirtualFileSystem(), 77 PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot); 78 if (SDKInfo && *SDKInfo) { 79 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo)); 80 return CachedDarwinSDKInfo->get(); 81 } 82 if (!SDKInfo) 83 llvm::consumeError(SDKInfo.takeError()); 84 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>(); 85 return nullptr; 86 } 87 88 IdentifierInfo * 89 Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, 90 unsigned int Index) { 91 std::string InventedName; 92 llvm::raw_string_ostream OS(InventedName); 93 94 if (!ParamName) 95 OS << "auto:" << Index + 1; 96 else 97 OS << ParamName->getName() << ":auto"; 98 99 OS.flush(); 100 return &Context.Idents.get(OS.str()); 101 } 102 103 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, 104 const Preprocessor &PP) { 105 PrintingPolicy Policy = Context.getPrintingPolicy(); 106 // In diagnostics, we print _Bool as bool if the latter is defined as the 107 // former. 108 Policy.Bool = Context.getLangOpts().Bool; 109 if (!Policy.Bool) { 110 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) { 111 Policy.Bool = BoolMacro->isObjectLike() && 112 BoolMacro->getNumTokens() == 1 && 113 BoolMacro->getReplacementToken(0).is(tok::kw__Bool); 114 } 115 } 116 117 return Policy; 118 } 119 120 void Sema::ActOnTranslationUnitScope(Scope *S) { 121 TUScope = S; 122 PushDeclContext(S, Context.getTranslationUnitDecl()); 123 } 124 125 namespace clang { 126 namespace sema { 127 128 class SemaPPCallbacks : public PPCallbacks { 129 Sema *S = nullptr; 130 llvm::SmallVector<SourceLocation, 8> IncludeStack; 131 132 public: 133 void set(Sema &S) { this->S = &S; } 134 135 void reset() { S = nullptr; } 136 137 virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason, 138 SrcMgr::CharacteristicKind FileType, 139 FileID PrevFID) override { 140 if (!S) 141 return; 142 switch (Reason) { 143 case EnterFile: { 144 SourceManager &SM = S->getSourceManager(); 145 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc)); 146 if (IncludeLoc.isValid()) { 147 if (llvm::timeTraceProfilerEnabled()) { 148 const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc)); 149 llvm::timeTraceProfilerBegin( 150 "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>")); 151 } 152 153 IncludeStack.push_back(IncludeLoc); 154 S->DiagnoseNonDefaultPragmaAlignPack( 155 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude, 156 IncludeLoc); 157 } 158 break; 159 } 160 case ExitFile: 161 if (!IncludeStack.empty()) { 162 if (llvm::timeTraceProfilerEnabled()) 163 llvm::timeTraceProfilerEnd(); 164 165 S->DiagnoseNonDefaultPragmaAlignPack( 166 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit, 167 IncludeStack.pop_back_val()); 168 } 169 break; 170 default: 171 break; 172 } 173 } 174 }; 175 176 } // end namespace sema 177 } // end namespace clang 178 179 const unsigned Sema::MaxAlignmentExponent; 180 const uint64_t Sema::MaximumAlignment; 181 182 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, 183 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) 184 : ExternalSource(nullptr), isMultiplexExternalSource(false), 185 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), 186 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), 187 SourceMgr(PP.getSourceManager()), CollectStats(false), 188 CodeCompleter(CodeCompleter), CurContext(nullptr), 189 OriginalLexicalContext(nullptr), MSStructPragmaOn(false), 190 MSPointerToMemberRepresentationMethod( 191 LangOpts.getMSPointerToMemberRepresentationMethod()), 192 VtorDispStack(LangOpts.getVtorDispMode()), 193 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)), 194 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr), 195 CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()), 196 CurInitSeg(nullptr), VisContext(nullptr), 197 PragmaAttributeCurrentTargetDecl(nullptr), 198 IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr), 199 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp), 200 StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr), 201 StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr), 202 MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr), 203 NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr), 204 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), 205 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), 206 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false), 207 TUKind(TUKind), NumSFINAEErrors(0), 208 FullyCheckedComparisonCategories( 209 static_cast<unsigned>(ComparisonCategoryType::Last) + 1), 210 SatisfactionCache(Context), AccessCheckingSFINAE(false), 211 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), 212 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), 213 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this), 214 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), 215 CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { 216 assert(pp.TUKind == TUKind); 217 TUScope = nullptr; 218 isConstantEvaluatedOverride = false; 219 220 LoadedExternalKnownNamespaces = false; 221 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I) 222 NSNumberLiteralMethods[I] = nullptr; 223 224 if (getLangOpts().ObjC) 225 NSAPIObj.reset(new NSAPI(Context)); 226 227 if (getLangOpts().CPlusPlus) 228 FieldCollector.reset(new CXXFieldCollector()); 229 230 // Tell diagnostics how to render things from the AST library. 231 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); 232 233 ExprEvalContexts.emplace_back( 234 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{}, 235 nullptr, ExpressionEvaluationContextRecord::EK_Other); 236 237 // Initialization of data sharing attributes stack for OpenMP 238 InitDataSharingAttributesStack(); 239 240 std::unique_ptr<sema::SemaPPCallbacks> Callbacks = 241 std::make_unique<sema::SemaPPCallbacks>(); 242 SemaPPCallbackHandler = Callbacks.get(); 243 PP.addPPCallbacks(std::move(Callbacks)); 244 SemaPPCallbackHandler->set(*this); 245 } 246 247 // Anchor Sema's type info to this TU. 248 void Sema::anchor() {} 249 250 void Sema::addImplicitTypedef(StringRef Name, QualType T) { 251 DeclarationName DN = &Context.Idents.get(Name); 252 if (IdResolver.begin(DN) == IdResolver.end()) 253 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope); 254 } 255 256 void Sema::Initialize() { 257 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 258 SC->InitializeSema(*this); 259 260 // Tell the external Sema source about this Sema object. 261 if (ExternalSemaSource *ExternalSema 262 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 263 ExternalSema->InitializeSema(*this); 264 265 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we 266 // will not be able to merge any duplicate __va_list_tag decls correctly. 267 VAListTagName = PP.getIdentifierInfo("__va_list_tag"); 268 269 if (!TUScope) 270 return; 271 272 // Initialize predefined 128-bit integer types, if needed. 273 if (Context.getTargetInfo().hasInt128Type() || 274 (Context.getAuxTargetInfo() && 275 Context.getAuxTargetInfo()->hasInt128Type())) { 276 // If either of the 128-bit integer types are unavailable to name lookup, 277 // define them now. 278 DeclarationName Int128 = &Context.Idents.get("__int128_t"); 279 if (IdResolver.begin(Int128) == IdResolver.end()) 280 PushOnScopeChains(Context.getInt128Decl(), TUScope); 281 282 DeclarationName UInt128 = &Context.Idents.get("__uint128_t"); 283 if (IdResolver.begin(UInt128) == IdResolver.end()) 284 PushOnScopeChains(Context.getUInt128Decl(), TUScope); 285 } 286 287 288 // Initialize predefined Objective-C types: 289 if (getLangOpts().ObjC) { 290 // If 'SEL' does not yet refer to any declarations, make it refer to the 291 // predefined 'SEL'. 292 DeclarationName SEL = &Context.Idents.get("SEL"); 293 if (IdResolver.begin(SEL) == IdResolver.end()) 294 PushOnScopeChains(Context.getObjCSelDecl(), TUScope); 295 296 // If 'id' does not yet refer to any declarations, make it refer to the 297 // predefined 'id'. 298 DeclarationName Id = &Context.Idents.get("id"); 299 if (IdResolver.begin(Id) == IdResolver.end()) 300 PushOnScopeChains(Context.getObjCIdDecl(), TUScope); 301 302 // Create the built-in typedef for 'Class'. 303 DeclarationName Class = &Context.Idents.get("Class"); 304 if (IdResolver.begin(Class) == IdResolver.end()) 305 PushOnScopeChains(Context.getObjCClassDecl(), TUScope); 306 307 // Create the built-in forward declaratino for 'Protocol'. 308 DeclarationName Protocol = &Context.Idents.get("Protocol"); 309 if (IdResolver.begin(Protocol) == IdResolver.end()) 310 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope); 311 } 312 313 // Create the internal type for the *StringMakeConstantString builtins. 314 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString"); 315 if (IdResolver.begin(ConstantString) == IdResolver.end()) 316 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope); 317 318 // Initialize Microsoft "predefined C++ types". 319 if (getLangOpts().MSVCCompat) { 320 if (getLangOpts().CPlusPlus && 321 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) 322 PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class), 323 TUScope); 324 325 addImplicitTypedef("size_t", Context.getSizeType()); 326 } 327 328 // Initialize predefined OpenCL types and supported extensions and (optional) 329 // core features. 330 if (getLangOpts().OpenCL) { 331 getOpenCLOptions().addSupport( 332 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts()); 333 addImplicitTypedef("sampler_t", Context.OCLSamplerTy); 334 addImplicitTypedef("event_t", Context.OCLEventTy); 335 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion(); 336 if (OCLCompatibleVersion >= 200) { 337 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) { 338 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy); 339 addImplicitTypedef("queue_t", Context.OCLQueueTy); 340 } 341 if (getLangOpts().OpenCLPipes) 342 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy); 343 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); 344 addImplicitTypedef("atomic_uint", 345 Context.getAtomicType(Context.UnsignedIntTy)); 346 addImplicitTypedef("atomic_float", 347 Context.getAtomicType(Context.FloatTy)); 348 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as 349 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. 350 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); 351 352 353 // OpenCL v2.0 s6.13.11.6: 354 // - The atomic_long and atomic_ulong types are supported if the 355 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics 356 // extensions are supported. 357 // - The atomic_double type is only supported if double precision 358 // is supported and the cl_khr_int64_base_atomics and 359 // cl_khr_int64_extended_atomics extensions are supported. 360 // - If the device address space is 64-bits, the data types 361 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and 362 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and 363 // cl_khr_int64_extended_atomics extensions are supported. 364 365 auto AddPointerSizeDependentTypes = [&]() { 366 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType()); 367 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType()); 368 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType()); 369 auto AtomicPtrDiffT = 370 Context.getAtomicType(Context.getPointerDiffType()); 371 addImplicitTypedef("atomic_size_t", AtomicSizeT); 372 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT); 373 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT); 374 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT); 375 }; 376 377 if (Context.getTypeSize(Context.getSizeType()) == 32) { 378 AddPointerSizeDependentTypes(); 379 } 380 381 if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) { 382 auto AtomicHalfT = Context.getAtomicType(Context.HalfTy); 383 addImplicitTypedef("atomic_half", AtomicHalfT); 384 } 385 386 std::vector<QualType> Atomic64BitTypes; 387 if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics", 388 getLangOpts()) && 389 getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics", 390 getLangOpts())) { 391 if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) { 392 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy); 393 addImplicitTypedef("atomic_double", AtomicDoubleT); 394 Atomic64BitTypes.push_back(AtomicDoubleT); 395 } 396 auto AtomicLongT = Context.getAtomicType(Context.LongTy); 397 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy); 398 addImplicitTypedef("atomic_long", AtomicLongT); 399 addImplicitTypedef("atomic_ulong", AtomicULongT); 400 401 402 if (Context.getTypeSize(Context.getSizeType()) == 64) { 403 AddPointerSizeDependentTypes(); 404 } 405 } 406 } 407 408 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 409 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \ 410 addImplicitTypedef(#ExtType, Context.Id##Ty); \ 411 } 412 #include "clang/Basic/OpenCLExtensionTypes.def" 413 } 414 415 if (Context.getTargetInfo().hasAArch64SVETypes()) { 416 #define SVE_TYPE(Name, Id, SingletonId) \ 417 addImplicitTypedef(Name, Context.SingletonId); 418 #include "clang/Basic/AArch64SVEACLETypes.def" 419 } 420 421 if (Context.getTargetInfo().getTriple().isPPC64()) { 422 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 423 addImplicitTypedef(#Name, Context.Id##Ty); 424 #include "clang/Basic/PPCTypes.def" 425 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 426 addImplicitTypedef(#Name, Context.Id##Ty); 427 #include "clang/Basic/PPCTypes.def" 428 } 429 430 if (Context.getTargetInfo().hasRISCVVTypes()) { 431 #define RVV_TYPE(Name, Id, SingletonId) \ 432 addImplicitTypedef(Name, Context.SingletonId); 433 #include "clang/Basic/RISCVVTypes.def" 434 } 435 436 if (Context.getTargetInfo().hasBuiltinMSVaList()) { 437 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list"); 438 if (IdResolver.begin(MSVaList) == IdResolver.end()) 439 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope); 440 } 441 442 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); 443 if (IdResolver.begin(BuiltinVaList) == IdResolver.end()) 444 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope); 445 } 446 447 Sema::~Sema() { 448 assert(InstantiatingSpecializations.empty() && 449 "failed to clean up an InstantiatingTemplate?"); 450 451 if (VisContext) FreeVisContext(); 452 453 // Kill all the active scopes. 454 for (sema::FunctionScopeInfo *FSI : FunctionScopes) 455 delete FSI; 456 457 // Tell the SemaConsumer to forget about us; we're going out of scope. 458 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 459 SC->ForgetSema(); 460 461 // Detach from the external Sema source. 462 if (ExternalSemaSource *ExternalSema 463 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 464 ExternalSema->ForgetSema(); 465 466 // If Sema's ExternalSource is the multiplexer - we own it. 467 if (isMultiplexExternalSource) 468 delete ExternalSource; 469 470 // Delete cached satisfactions. 471 std::vector<ConstraintSatisfaction *> Satisfactions; 472 Satisfactions.reserve(Satisfactions.size()); 473 for (auto &Node : SatisfactionCache) 474 Satisfactions.push_back(&Node); 475 for (auto *Node : Satisfactions) 476 delete Node; 477 478 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); 479 480 // Destroys data sharing attributes stack for OpenMP 481 DestroyDataSharingAttributesStack(); 482 483 // Detach from the PP callback handler which outlives Sema since it's owned 484 // by the preprocessor. 485 SemaPPCallbackHandler->reset(); 486 } 487 488 void Sema::warnStackExhausted(SourceLocation Loc) { 489 // Only warn about this once. 490 if (!WarnedStackExhausted) { 491 Diag(Loc, diag::warn_stack_exhausted); 492 WarnedStackExhausted = true; 493 } 494 } 495 496 void Sema::runWithSufficientStackSpace(SourceLocation Loc, 497 llvm::function_ref<void()> Fn) { 498 clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn); 499 } 500 501 /// makeUnavailableInSystemHeader - There is an error in the current 502 /// context. If we're still in a system header, and we can plausibly 503 /// make the relevant declaration unavailable instead of erroring, do 504 /// so and return true. 505 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc, 506 UnavailableAttr::ImplicitReason reason) { 507 // If we're not in a function, it's an error. 508 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext); 509 if (!fn) return false; 510 511 // If we're in template instantiation, it's an error. 512 if (inTemplateInstantiation()) 513 return false; 514 515 // If that function's not in a system header, it's an error. 516 if (!Context.getSourceManager().isInSystemHeader(loc)) 517 return false; 518 519 // If the function is already unavailable, it's not an error. 520 if (fn->hasAttr<UnavailableAttr>()) return true; 521 522 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc)); 523 return true; 524 } 525 526 ASTMutationListener *Sema::getASTMutationListener() const { 527 return getASTConsumer().GetASTMutationListener(); 528 } 529 530 ///Registers an external source. If an external source already exists, 531 /// creates a multiplex external source and appends to it. 532 /// 533 ///\param[in] E - A non-null external sema source. 534 /// 535 void Sema::addExternalSource(ExternalSemaSource *E) { 536 assert(E && "Cannot use with NULL ptr"); 537 538 if (!ExternalSource) { 539 ExternalSource = E; 540 return; 541 } 542 543 if (isMultiplexExternalSource) 544 static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E); 545 else { 546 ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E); 547 isMultiplexExternalSource = true; 548 } 549 } 550 551 /// Print out statistics about the semantic analysis. 552 void Sema::PrintStats() const { 553 llvm::errs() << "\n*** Semantic Analysis Stats:\n"; 554 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n"; 555 556 BumpAlloc.PrintStats(); 557 AnalysisWarnings.PrintStats(); 558 } 559 560 void Sema::diagnoseNullableToNonnullConversion(QualType DstType, 561 QualType SrcType, 562 SourceLocation Loc) { 563 Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context); 564 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable && 565 *ExprNullability != NullabilityKind::NullableResult)) 566 return; 567 568 Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context); 569 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull) 570 return; 571 572 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType; 573 } 574 575 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) { 576 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant, 577 E->getBeginLoc())) 578 return; 579 // nullptr only exists from C++11 on, so don't warn on its absence earlier. 580 if (!getLangOpts().CPlusPlus11) 581 return; 582 583 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer) 584 return; 585 if (E->IgnoreParenImpCasts()->getType()->isNullPtrType()) 586 return; 587 588 // Don't diagnose the conversion from a 0 literal to a null pointer argument 589 // in a synthesized call to operator<=>. 590 if (!CodeSynthesisContexts.empty() && 591 CodeSynthesisContexts.back().Kind == 592 CodeSynthesisContext::RewritingOperatorAsSpaceship) 593 return; 594 595 // If it is a macro from system header, and if the macro name is not "NULL", 596 // do not warn. 597 SourceLocation MaybeMacroLoc = E->getBeginLoc(); 598 if (Diags.getSuppressSystemWarnings() && 599 SourceMgr.isInSystemMacro(MaybeMacroLoc) && 600 !findMacroSpelling(MaybeMacroLoc, "NULL")) 601 return; 602 603 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant) 604 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr"); 605 } 606 607 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast. 608 /// If there is already an implicit cast, merge into the existing one. 609 /// The result is of the given category. 610 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, 611 CastKind Kind, ExprValueKind VK, 612 const CXXCastPath *BasePath, 613 CheckedConversionKind CCK) { 614 #ifndef NDEBUG 615 if (VK == VK_PRValue && !E->isPRValue()) { 616 switch (Kind) { 617 default: 618 llvm_unreachable( 619 ("can't implicitly cast glvalue to prvalue with this cast " 620 "kind: " + 621 std::string(CastExpr::getCastKindName(Kind))) 622 .c_str()); 623 case CK_Dependent: 624 case CK_LValueToRValue: 625 case CK_ArrayToPointerDecay: 626 case CK_FunctionToPointerDecay: 627 case CK_ToVoid: 628 case CK_NonAtomicToAtomic: 629 break; 630 } 631 } 632 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) && 633 "can't cast prvalue to glvalue"); 634 #endif 635 636 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc()); 637 diagnoseZeroToNullptrConversion(Kind, E); 638 639 QualType ExprTy = Context.getCanonicalType(E->getType()); 640 QualType TypeTy = Context.getCanonicalType(Ty); 641 642 if (ExprTy == TypeTy) 643 return E; 644 645 if (Kind == CK_ArrayToPointerDecay) { 646 // C++1z [conv.array]: The temporary materialization conversion is applied. 647 // We also use this to fuel C++ DR1213, which applies to C++11 onwards. 648 if (getLangOpts().CPlusPlus && E->isPRValue()) { 649 // The temporary is an lvalue in C++98 and an xvalue otherwise. 650 ExprResult Materialized = CreateMaterializeTemporaryExpr( 651 E->getType(), E, !getLangOpts().CPlusPlus11); 652 if (Materialized.isInvalid()) 653 return ExprError(); 654 E = Materialized.get(); 655 } 656 // C17 6.7.1p6 footnote 124: The implementation can treat any register 657 // declaration simply as an auto declaration. However, whether or not 658 // addressable storage is actually used, the address of any part of an 659 // object declared with storage-class specifier register cannot be 660 // computed, either explicitly(by use of the unary & operator as discussed 661 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as 662 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an 663 // array declared with storage-class specifier register is sizeof. 664 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) { 665 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 666 if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 667 if (VD->getStorageClass() == SC_Register) { 668 Diag(E->getExprLoc(), diag::err_typecheck_address_of) 669 << /*register variable*/ 3 << E->getSourceRange(); 670 return ExprError(); 671 } 672 } 673 } 674 } 675 } 676 677 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { 678 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { 679 ImpCast->setType(Ty); 680 ImpCast->setValueKind(VK); 681 return E; 682 } 683 } 684 685 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK, 686 CurFPFeatureOverrides()); 687 } 688 689 /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding 690 /// to the conversion from scalar type ScalarTy to the Boolean type. 691 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) { 692 switch (ScalarTy->getScalarTypeKind()) { 693 case Type::STK_Bool: return CK_NoOp; 694 case Type::STK_CPointer: return CK_PointerToBoolean; 695 case Type::STK_BlockPointer: return CK_PointerToBoolean; 696 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean; 697 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean; 698 case Type::STK_Integral: return CK_IntegralToBoolean; 699 case Type::STK_Floating: return CK_FloatingToBoolean; 700 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean; 701 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean; 702 case Type::STK_FixedPoint: return CK_FixedPointToBoolean; 703 } 704 llvm_unreachable("unknown scalar type kind"); 705 } 706 707 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector. 708 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) { 709 if (D->getMostRecentDecl()->isUsed()) 710 return true; 711 712 if (D->isExternallyVisible()) 713 return true; 714 715 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 716 // If this is a function template and none of its specializations is used, 717 // we should warn. 718 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate()) 719 for (const auto *Spec : Template->specializations()) 720 if (ShouldRemoveFromUnused(SemaRef, Spec)) 721 return true; 722 723 // UnusedFileScopedDecls stores the first declaration. 724 // The declaration may have become definition so check again. 725 const FunctionDecl *DeclToCheck; 726 if (FD->hasBody(DeclToCheck)) 727 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 728 729 // Later redecls may add new information resulting in not having to warn, 730 // so check again. 731 DeclToCheck = FD->getMostRecentDecl(); 732 if (DeclToCheck != FD) 733 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 734 } 735 736 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 737 // If a variable usable in constant expressions is referenced, 738 // don't warn if it isn't used: if the value of a variable is required 739 // for the computation of a constant expression, it doesn't make sense to 740 // warn even if the variable isn't odr-used. (isReferenced doesn't 741 // precisely reflect that, but it's a decent approximation.) 742 if (VD->isReferenced() && 743 VD->mightBeUsableInConstantExpressions(SemaRef->Context)) 744 return true; 745 746 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate()) 747 // If this is a variable template and none of its specializations is used, 748 // we should warn. 749 for (const auto *Spec : Template->specializations()) 750 if (ShouldRemoveFromUnused(SemaRef, Spec)) 751 return true; 752 753 // UnusedFileScopedDecls stores the first declaration. 754 // The declaration may have become definition so check again. 755 const VarDecl *DeclToCheck = VD->getDefinition(); 756 if (DeclToCheck) 757 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 758 759 // Later redecls may add new information resulting in not having to warn, 760 // so check again. 761 DeclToCheck = VD->getMostRecentDecl(); 762 if (DeclToCheck != VD) 763 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 764 } 765 766 return false; 767 } 768 769 static bool isFunctionOrVarDeclExternC(NamedDecl *ND) { 770 if (auto *FD = dyn_cast<FunctionDecl>(ND)) 771 return FD->isExternC(); 772 return cast<VarDecl>(ND)->isExternC(); 773 } 774 775 /// Determine whether ND is an external-linkage function or variable whose 776 /// type has no linkage. 777 bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) { 778 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage, 779 // because we also want to catch the case where its type has VisibleNoLinkage, 780 // which does not affect the linkage of VD. 781 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() && 782 !isExternalFormalLinkage(VD->getType()->getLinkage()) && 783 !isFunctionOrVarDeclExternC(VD); 784 } 785 786 /// Obtains a sorted list of functions and variables that are undefined but 787 /// ODR-used. 788 void Sema::getUndefinedButUsed( 789 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) { 790 for (const auto &UndefinedUse : UndefinedButUsed) { 791 NamedDecl *ND = UndefinedUse.first; 792 793 // Ignore attributes that have become invalid. 794 if (ND->isInvalidDecl()) continue; 795 796 // __attribute__((weakref)) is basically a definition. 797 if (ND->hasAttr<WeakRefAttr>()) continue; 798 799 if (isa<CXXDeductionGuideDecl>(ND)) 800 continue; 801 802 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) { 803 // An exported function will always be emitted when defined, so even if 804 // the function is inline, it doesn't have to be emitted in this TU. An 805 // imported function implies that it has been exported somewhere else. 806 continue; 807 } 808 809 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { 810 if (FD->isDefined()) 811 continue; 812 if (FD->isExternallyVisible() && 813 !isExternalWithNoLinkageType(FD) && 814 !FD->getMostRecentDecl()->isInlined() && 815 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 816 continue; 817 if (FD->getBuiltinID()) 818 continue; 819 } else { 820 auto *VD = cast<VarDecl>(ND); 821 if (VD->hasDefinition() != VarDecl::DeclarationOnly) 822 continue; 823 if (VD->isExternallyVisible() && 824 !isExternalWithNoLinkageType(VD) && 825 !VD->getMostRecentDecl()->isInline() && 826 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 827 continue; 828 829 // Skip VarDecls that lack formal definitions but which we know are in 830 // fact defined somewhere. 831 if (VD->isKnownToBeDefined()) 832 continue; 833 } 834 835 Undefined.push_back(std::make_pair(ND, UndefinedUse.second)); 836 } 837 } 838 839 /// checkUndefinedButUsed - Check for undefined objects with internal linkage 840 /// or that are inline. 841 static void checkUndefinedButUsed(Sema &S) { 842 if (S.UndefinedButUsed.empty()) return; 843 844 // Collect all the still-undefined entities with internal linkage. 845 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined; 846 S.getUndefinedButUsed(Undefined); 847 if (Undefined.empty()) return; 848 849 for (auto Undef : Undefined) { 850 ValueDecl *VD = cast<ValueDecl>(Undef.first); 851 SourceLocation UseLoc = Undef.second; 852 853 if (S.isExternalWithNoLinkageType(VD)) { 854 // C++ [basic.link]p8: 855 // A type without linkage shall not be used as the type of a variable 856 // or function with external linkage unless 857 // -- the entity has C language linkage 858 // -- the entity is not odr-used or is defined in the same TU 859 // 860 // As an extension, accept this in cases where the type is externally 861 // visible, since the function or variable actually can be defined in 862 // another translation unit in that case. 863 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage()) 864 ? diag::ext_undefined_internal_type 865 : diag::err_undefined_internal_type) 866 << isa<VarDecl>(VD) << VD; 867 } else if (!VD->isExternallyVisible()) { 868 // FIXME: We can promote this to an error. The function or variable can't 869 // be defined anywhere else, so the program must necessarily violate the 870 // one definition rule. 871 bool IsImplicitBase = false; 872 if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) { 873 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>(); 874 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive( 875 llvm::omp::TraitProperty:: 876 implementation_extension_disable_implicit_base)) { 877 const auto *Func = cast<FunctionDecl>( 878 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl()); 879 IsImplicitBase = BaseD->isImplicit() && 880 Func->getIdentifier()->isMangledOpenMPVariantName(); 881 } 882 } 883 if (!S.getLangOpts().OpenMP || !IsImplicitBase) 884 S.Diag(VD->getLocation(), diag::warn_undefined_internal) 885 << isa<VarDecl>(VD) << VD; 886 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) { 887 (void)FD; 888 assert(FD->getMostRecentDecl()->isInlined() && 889 "used object requires definition but isn't inline or internal?"); 890 // FIXME: This is ill-formed; we should reject. 891 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD; 892 } else { 893 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() && 894 "used var requires definition but isn't inline or internal?"); 895 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD; 896 } 897 if (UseLoc.isValid()) 898 S.Diag(UseLoc, diag::note_used_here); 899 } 900 901 S.UndefinedButUsed.clear(); 902 } 903 904 void Sema::LoadExternalWeakUndeclaredIdentifiers() { 905 if (!ExternalSource) 906 return; 907 908 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; 909 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); 910 for (auto &WeakID : WeakIDs) 911 WeakUndeclaredIdentifiers.insert(WeakID); 912 } 913 914 915 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap; 916 917 /// Returns true, if all methods and nested classes of the given 918 /// CXXRecordDecl are defined in this translation unit. 919 /// 920 /// Should only be called from ActOnEndOfTranslationUnit so that all 921 /// definitions are actually read. 922 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD, 923 RecordCompleteMap &MNCComplete) { 924 RecordCompleteMap::iterator Cache = MNCComplete.find(RD); 925 if (Cache != MNCComplete.end()) 926 return Cache->second; 927 if (!RD->isCompleteDefinition()) 928 return false; 929 bool Complete = true; 930 for (DeclContext::decl_iterator I = RD->decls_begin(), 931 E = RD->decls_end(); 932 I != E && Complete; ++I) { 933 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) 934 Complete = M->isDefined() || M->isDefaulted() || 935 (M->isPure() && !isa<CXXDestructorDecl>(M)); 936 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I)) 937 // If the template function is marked as late template parsed at this 938 // point, it has not been instantiated and therefore we have not 939 // performed semantic analysis on it yet, so we cannot know if the type 940 // can be considered complete. 941 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() && 942 F->getTemplatedDecl()->isDefined(); 943 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) { 944 if (R->isInjectedClassName()) 945 continue; 946 if (R->hasDefinition()) 947 Complete = MethodsAndNestedClassesComplete(R->getDefinition(), 948 MNCComplete); 949 else 950 Complete = false; 951 } 952 } 953 MNCComplete[RD] = Complete; 954 return Complete; 955 } 956 957 /// Returns true, if the given CXXRecordDecl is fully defined in this 958 /// translation unit, i.e. all methods are defined or pure virtual and all 959 /// friends, friend functions and nested classes are fully defined in this 960 /// translation unit. 961 /// 962 /// Should only be called from ActOnEndOfTranslationUnit so that all 963 /// definitions are actually read. 964 static bool IsRecordFullyDefined(const CXXRecordDecl *RD, 965 RecordCompleteMap &RecordsComplete, 966 RecordCompleteMap &MNCComplete) { 967 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD); 968 if (Cache != RecordsComplete.end()) 969 return Cache->second; 970 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete); 971 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(), 972 E = RD->friend_end(); 973 I != E && Complete; ++I) { 974 // Check if friend classes and methods are complete. 975 if (TypeSourceInfo *TSI = (*I)->getFriendType()) { 976 // Friend classes are available as the TypeSourceInfo of the FriendDecl. 977 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl()) 978 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete); 979 else 980 Complete = false; 981 } else { 982 // Friend functions are available through the NamedDecl of FriendDecl. 983 if (const FunctionDecl *FD = 984 dyn_cast<FunctionDecl>((*I)->getFriendDecl())) 985 Complete = FD->isDefined(); 986 else 987 // This is a template friend, give up. 988 Complete = false; 989 } 990 } 991 RecordsComplete[RD] = Complete; 992 return Complete; 993 } 994 995 void Sema::emitAndClearUnusedLocalTypedefWarnings() { 996 if (ExternalSource) 997 ExternalSource->ReadUnusedLocalTypedefNameCandidates( 998 UnusedLocalTypedefNameCandidates); 999 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) { 1000 if (TD->isReferenced()) 1001 continue; 1002 Diag(TD->getLocation(), diag::warn_unused_local_typedef) 1003 << isa<TypeAliasDecl>(TD) << TD->getDeclName(); 1004 } 1005 UnusedLocalTypedefNameCandidates.clear(); 1006 } 1007 1008 /// This is called before the very first declaration in the translation unit 1009 /// is parsed. Note that the ASTContext may have already injected some 1010 /// declarations. 1011 void Sema::ActOnStartOfTranslationUnit() { 1012 if (getLangOpts().ModulesTS && 1013 (getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface || 1014 getLangOpts().getCompilingModule() == LangOptions::CMK_None)) { 1015 // We start in an implied global module fragment. 1016 SourceLocation StartOfTU = 1017 SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); 1018 ActOnGlobalModuleFragmentDecl(StartOfTU); 1019 ModuleScopes.back().ImplicitGlobalModuleFragment = true; 1020 } 1021 } 1022 1023 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) { 1024 // No explicit actions are required at the end of the global module fragment. 1025 if (Kind == TUFragmentKind::Global) 1026 return; 1027 1028 // Transfer late parsed template instantiations over to the pending template 1029 // instantiation list. During normal compilation, the late template parser 1030 // will be installed and instantiating these templates will succeed. 1031 // 1032 // If we are building a TU prefix for serialization, it is also safe to 1033 // transfer these over, even though they are not parsed. The end of the TU 1034 // should be outside of any eager template instantiation scope, so when this 1035 // AST is deserialized, these templates will not be parsed until the end of 1036 // the combined TU. 1037 PendingInstantiations.insert(PendingInstantiations.end(), 1038 LateParsedInstantiations.begin(), 1039 LateParsedInstantiations.end()); 1040 LateParsedInstantiations.clear(); 1041 1042 // If DefinedUsedVTables ends up marking any virtual member functions it 1043 // might lead to more pending template instantiations, which we then need 1044 // to instantiate. 1045 DefineUsedVTables(); 1046 1047 // C++: Perform implicit template instantiations. 1048 // 1049 // FIXME: When we perform these implicit instantiations, we do not 1050 // carefully keep track of the point of instantiation (C++ [temp.point]). 1051 // This means that name lookup that occurs within the template 1052 // instantiation will always happen at the end of the translation unit, 1053 // so it will find some names that are not required to be found. This is 1054 // valid, but we could do better by diagnosing if an instantiation uses a 1055 // name that was not visible at its first point of instantiation. 1056 if (ExternalSource) { 1057 // Load pending instantiations from the external source. 1058 SmallVector<PendingImplicitInstantiation, 4> Pending; 1059 ExternalSource->ReadPendingInstantiations(Pending); 1060 for (auto PII : Pending) 1061 if (auto Func = dyn_cast<FunctionDecl>(PII.first)) 1062 Func->setInstantiationIsPending(true); 1063 PendingInstantiations.insert(PendingInstantiations.begin(), 1064 Pending.begin(), Pending.end()); 1065 } 1066 1067 { 1068 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1069 PerformPendingInstantiations(); 1070 } 1071 1072 emitDeferredDiags(); 1073 1074 assert(LateParsedInstantiations.empty() && 1075 "end of TU template instantiation should not create more " 1076 "late-parsed templates"); 1077 1078 // Report diagnostics for uncorrected delayed typos. Ideally all of them 1079 // should have been corrected by that time, but it is very hard to cover all 1080 // cases in practice. 1081 for (const auto &Typo : DelayedTypos) { 1082 // We pass an empty TypoCorrection to indicate no correction was performed. 1083 Typo.second.DiagHandler(TypoCorrection()); 1084 } 1085 DelayedTypos.clear(); 1086 } 1087 1088 /// ActOnEndOfTranslationUnit - This is called at the very end of the 1089 /// translation unit when EOF is reached and all but the top-level scope is 1090 /// popped. 1091 void Sema::ActOnEndOfTranslationUnit() { 1092 assert(DelayedDiagnostics.getCurrentPool() == nullptr 1093 && "reached end of translation unit with a pool attached?"); 1094 1095 // If code completion is enabled, don't perform any end-of-translation-unit 1096 // work. 1097 if (PP.isCodeCompletionEnabled()) 1098 return; 1099 1100 // Complete translation units and modules define vtables and perform implicit 1101 // instantiations. PCH files do not. 1102 if (TUKind != TU_Prefix) { 1103 DiagnoseUseOfUnimplementedSelectors(); 1104 1105 ActOnEndOfTranslationUnitFragment( 1106 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind == 1107 Module::PrivateModuleFragment 1108 ? TUFragmentKind::Private 1109 : TUFragmentKind::Normal); 1110 1111 if (LateTemplateParserCleanup) 1112 LateTemplateParserCleanup(OpaqueParser); 1113 1114 CheckDelayedMemberExceptionSpecs(); 1115 } else { 1116 // If we are building a TU prefix for serialization, it is safe to transfer 1117 // these over, even though they are not parsed. The end of the TU should be 1118 // outside of any eager template instantiation scope, so when this AST is 1119 // deserialized, these templates will not be parsed until the end of the 1120 // combined TU. 1121 PendingInstantiations.insert(PendingInstantiations.end(), 1122 LateParsedInstantiations.begin(), 1123 LateParsedInstantiations.end()); 1124 LateParsedInstantiations.clear(); 1125 1126 if (LangOpts.PCHInstantiateTemplates) { 1127 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1128 PerformPendingInstantiations(); 1129 } 1130 } 1131 1132 DiagnoseUnterminatedPragmaAlignPack(); 1133 DiagnoseUnterminatedPragmaAttribute(); 1134 1135 // All delayed member exception specs should be checked or we end up accepting 1136 // incompatible declarations. 1137 assert(DelayedOverridingExceptionSpecChecks.empty()); 1138 assert(DelayedEquivalentExceptionSpecChecks.empty()); 1139 1140 // All dllexport classes should have been processed already. 1141 assert(DelayedDllExportClasses.empty()); 1142 assert(DelayedDllExportMemberFunctions.empty()); 1143 1144 // Remove file scoped decls that turned out to be used. 1145 UnusedFileScopedDecls.erase( 1146 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true), 1147 UnusedFileScopedDecls.end(), 1148 [this](const DeclaratorDecl *DD) { 1149 return ShouldRemoveFromUnused(this, DD); 1150 }), 1151 UnusedFileScopedDecls.end()); 1152 1153 if (TUKind == TU_Prefix) { 1154 // Translation unit prefixes don't need any of the checking below. 1155 if (!PP.isIncrementalProcessingEnabled()) 1156 TUScope = nullptr; 1157 return; 1158 } 1159 1160 // Check for #pragma weak identifiers that were never declared 1161 LoadExternalWeakUndeclaredIdentifiers(); 1162 for (auto WeakID : WeakUndeclaredIdentifiers) { 1163 if (WeakID.second.getUsed()) 1164 continue; 1165 1166 Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(), 1167 LookupOrdinaryName); 1168 if (PrevDecl != nullptr && 1169 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) 1170 Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type) 1171 << "'weak'" << ExpectedVariableOrFunction; 1172 else 1173 Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared) 1174 << WeakID.first; 1175 } 1176 1177 if (LangOpts.CPlusPlus11 && 1178 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation())) 1179 CheckDelegatingCtorCycles(); 1180 1181 if (!Diags.hasErrorOccurred()) { 1182 if (ExternalSource) 1183 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed); 1184 checkUndefinedButUsed(*this); 1185 } 1186 1187 // A global-module-fragment is only permitted within a module unit. 1188 bool DiagnosedMissingModuleDeclaration = false; 1189 if (!ModuleScopes.empty() && 1190 ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment && 1191 !ModuleScopes.back().ImplicitGlobalModuleFragment) { 1192 Diag(ModuleScopes.back().BeginLoc, 1193 diag::err_module_declaration_missing_after_global_module_introducer); 1194 DiagnosedMissingModuleDeclaration = true; 1195 } 1196 1197 if (TUKind == TU_Module) { 1198 // If we are building a module interface unit, we need to have seen the 1199 // module declaration by now. 1200 if (getLangOpts().getCompilingModule() == 1201 LangOptions::CMK_ModuleInterface && 1202 (ModuleScopes.empty() || 1203 !ModuleScopes.back().Module->isModulePurview()) && 1204 !DiagnosedMissingModuleDeclaration) { 1205 // FIXME: Make a better guess as to where to put the module declaration. 1206 Diag(getSourceManager().getLocForStartOfFile( 1207 getSourceManager().getMainFileID()), 1208 diag::err_module_declaration_missing); 1209 } 1210 1211 // If we are building a module, resolve all of the exported declarations 1212 // now. 1213 if (Module *CurrentModule = PP.getCurrentModule()) { 1214 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); 1215 1216 SmallVector<Module *, 2> Stack; 1217 Stack.push_back(CurrentModule); 1218 while (!Stack.empty()) { 1219 Module *Mod = Stack.pop_back_val(); 1220 1221 // Resolve the exported declarations and conflicts. 1222 // FIXME: Actually complain, once we figure out how to teach the 1223 // diagnostic client to deal with complaints in the module map at this 1224 // point. 1225 ModMap.resolveExports(Mod, /*Complain=*/false); 1226 ModMap.resolveUses(Mod, /*Complain=*/false); 1227 ModMap.resolveConflicts(Mod, /*Complain=*/false); 1228 1229 // Queue the submodules, so their exports will also be resolved. 1230 Stack.append(Mod->submodule_begin(), Mod->submodule_end()); 1231 } 1232 } 1233 1234 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for 1235 // modules when they are built, not every time they are used. 1236 emitAndClearUnusedLocalTypedefWarnings(); 1237 } 1238 1239 // C99 6.9.2p2: 1240 // A declaration of an identifier for an object that has file 1241 // scope without an initializer, and without a storage-class 1242 // specifier or with the storage-class specifier static, 1243 // constitutes a tentative definition. If a translation unit 1244 // contains one or more tentative definitions for an identifier, 1245 // and the translation unit contains no external definition for 1246 // that identifier, then the behavior is exactly as if the 1247 // translation unit contains a file scope declaration of that 1248 // identifier, with the composite type as of the end of the 1249 // translation unit, with an initializer equal to 0. 1250 llvm::SmallSet<VarDecl *, 32> Seen; 1251 for (TentativeDefinitionsType::iterator 1252 T = TentativeDefinitions.begin(ExternalSource), 1253 TEnd = TentativeDefinitions.end(); 1254 T != TEnd; ++T) { 1255 VarDecl *VD = (*T)->getActingDefinition(); 1256 1257 // If the tentative definition was completed, getActingDefinition() returns 1258 // null. If we've already seen this variable before, insert()'s second 1259 // return value is false. 1260 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second) 1261 continue; 1262 1263 if (const IncompleteArrayType *ArrayT 1264 = Context.getAsIncompleteArrayType(VD->getType())) { 1265 // Set the length of the array to 1 (C99 6.9.2p5). 1266 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array); 1267 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true); 1268 QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One, 1269 nullptr, ArrayType::Normal, 0); 1270 VD->setType(T); 1271 } else if (RequireCompleteType(VD->getLocation(), VD->getType(), 1272 diag::err_tentative_def_incomplete_type)) 1273 VD->setInvalidDecl(); 1274 1275 // No initialization is performed for a tentative definition. 1276 CheckCompleteVariableDeclaration(VD); 1277 1278 // Notify the consumer that we've completed a tentative definition. 1279 if (!VD->isInvalidDecl()) 1280 Consumer.CompleteTentativeDefinition(VD); 1281 } 1282 1283 for (auto D : ExternalDeclarations) { 1284 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed()) 1285 continue; 1286 1287 Consumer.CompleteExternalDeclaration(D); 1288 } 1289 1290 // If there were errors, disable 'unused' warnings since they will mostly be 1291 // noise. Don't warn for a use from a module: either we should warn on all 1292 // file-scope declarations in modules or not at all, but whether the 1293 // declaration is used is immaterial. 1294 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) { 1295 // Output warning for unused file scoped decls. 1296 for (UnusedFileScopedDeclsType::iterator 1297 I = UnusedFileScopedDecls.begin(ExternalSource), 1298 E = UnusedFileScopedDecls.end(); I != E; ++I) { 1299 if (ShouldRemoveFromUnused(this, *I)) 1300 continue; 1301 1302 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { 1303 const FunctionDecl *DiagD; 1304 if (!FD->hasBody(DiagD)) 1305 DiagD = FD; 1306 if (DiagD->isDeleted()) 1307 continue; // Deleted functions are supposed to be unused. 1308 if (DiagD->isReferenced()) { 1309 if (isa<CXXMethodDecl>(DiagD)) 1310 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function) 1311 << DiagD; 1312 else { 1313 if (FD->getStorageClass() == SC_Static && 1314 !FD->isInlineSpecified() && 1315 !SourceMgr.isInMainFile( 1316 SourceMgr.getExpansionLoc(FD->getLocation()))) 1317 Diag(DiagD->getLocation(), 1318 diag::warn_unneeded_static_internal_decl) 1319 << DiagD; 1320 else 1321 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1322 << /*function*/ 0 << DiagD; 1323 } 1324 } else { 1325 if (FD->getDescribedFunctionTemplate()) 1326 Diag(DiagD->getLocation(), diag::warn_unused_template) 1327 << /*function*/ 0 << DiagD; 1328 else 1329 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD) 1330 ? diag::warn_unused_member_function 1331 : diag::warn_unused_function) 1332 << DiagD; 1333 } 1334 } else { 1335 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition(); 1336 if (!DiagD) 1337 DiagD = cast<VarDecl>(*I); 1338 if (DiagD->isReferenced()) { 1339 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1340 << /*variable*/ 1 << DiagD; 1341 } else if (DiagD->getType().isConstQualified()) { 1342 const SourceManager &SM = SourceMgr; 1343 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) || 1344 !PP.getLangOpts().IsHeaderFile) 1345 Diag(DiagD->getLocation(), diag::warn_unused_const_variable) 1346 << DiagD; 1347 } else { 1348 if (DiagD->getDescribedVarTemplate()) 1349 Diag(DiagD->getLocation(), diag::warn_unused_template) 1350 << /*variable*/ 1 << DiagD; 1351 else 1352 Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD; 1353 } 1354 } 1355 } 1356 1357 emitAndClearUnusedLocalTypedefWarnings(); 1358 } 1359 1360 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) { 1361 // FIXME: Load additional unused private field candidates from the external 1362 // source. 1363 RecordCompleteMap RecordsComplete; 1364 RecordCompleteMap MNCComplete; 1365 for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(), 1366 E = UnusedPrivateFields.end(); I != E; ++I) { 1367 const NamedDecl *D = *I; 1368 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); 1369 if (RD && !RD->isUnion() && 1370 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) { 1371 Diag(D->getLocation(), diag::warn_unused_private_field) 1372 << D->getDeclName(); 1373 } 1374 } 1375 } 1376 1377 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { 1378 if (ExternalSource) 1379 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); 1380 for (const auto &DeletedFieldInfo : DeleteExprs) { 1381 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { 1382 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, 1383 DeleteExprLoc.second); 1384 } 1385 } 1386 } 1387 1388 // Check we've noticed that we're no longer parsing the initializer for every 1389 // variable. If we miss cases, then at best we have a performance issue and 1390 // at worst a rejects-valid bug. 1391 assert(ParsingInitForAutoVars.empty() && 1392 "Didn't unmark var as having its initializer parsed"); 1393 1394 if (!PP.isIncrementalProcessingEnabled()) 1395 TUScope = nullptr; 1396 } 1397 1398 1399 //===----------------------------------------------------------------------===// 1400 // Helper functions. 1401 //===----------------------------------------------------------------------===// 1402 1403 DeclContext *Sema::getFunctionLevelDeclContext() { 1404 DeclContext *DC = CurContext; 1405 1406 while (true) { 1407 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) || 1408 isa<RequiresExprBodyDecl>(DC)) { 1409 DC = DC->getParent(); 1410 } else if (isa<CXXMethodDecl>(DC) && 1411 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call && 1412 cast<CXXRecordDecl>(DC->getParent())->isLambda()) { 1413 DC = DC->getParent()->getParent(); 1414 } 1415 else break; 1416 } 1417 1418 return DC; 1419 } 1420 1421 /// getCurFunctionDecl - If inside of a function body, this returns a pointer 1422 /// to the function decl for the function being parsed. If we're currently 1423 /// in a 'block', this returns the containing context. 1424 FunctionDecl *Sema::getCurFunctionDecl() { 1425 DeclContext *DC = getFunctionLevelDeclContext(); 1426 return dyn_cast<FunctionDecl>(DC); 1427 } 1428 1429 ObjCMethodDecl *Sema::getCurMethodDecl() { 1430 DeclContext *DC = getFunctionLevelDeclContext(); 1431 while (isa<RecordDecl>(DC)) 1432 DC = DC->getParent(); 1433 return dyn_cast<ObjCMethodDecl>(DC); 1434 } 1435 1436 NamedDecl *Sema::getCurFunctionOrMethodDecl() { 1437 DeclContext *DC = getFunctionLevelDeclContext(); 1438 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC)) 1439 return cast<NamedDecl>(DC); 1440 return nullptr; 1441 } 1442 1443 LangAS Sema::getDefaultCXXMethodAddrSpace() const { 1444 if (getLangOpts().OpenCL) 1445 return getASTContext().getDefaultOpenCLPointeeAddrSpace(); 1446 return LangAS::Default; 1447 } 1448 1449 void Sema::EmitCurrentDiagnostic(unsigned DiagID) { 1450 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here 1451 // and yet we also use the current diag ID on the DiagnosticsEngine. This has 1452 // been made more painfully obvious by the refactor that introduced this 1453 // function, but it is possible that the incoming argument can be 1454 // eliminated. If it truly cannot be (for example, there is some reentrancy 1455 // issue I am not seeing yet), then there should at least be a clarifying 1456 // comment somewhere. 1457 if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) { 1458 switch (DiagnosticIDs::getDiagnosticSFINAEResponse( 1459 Diags.getCurrentDiagID())) { 1460 case DiagnosticIDs::SFINAE_Report: 1461 // We'll report the diagnostic below. 1462 break; 1463 1464 case DiagnosticIDs::SFINAE_SubstitutionFailure: 1465 // Count this failure so that we know that template argument deduction 1466 // has failed. 1467 ++NumSFINAEErrors; 1468 1469 // Make a copy of this suppressed diagnostic and store it with the 1470 // template-deduction information. 1471 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1472 Diagnostic DiagInfo(&Diags); 1473 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1474 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1475 } 1476 1477 Diags.setLastDiagnosticIgnored(true); 1478 Diags.Clear(); 1479 return; 1480 1481 case DiagnosticIDs::SFINAE_AccessControl: { 1482 // Per C++ Core Issue 1170, access control is part of SFINAE. 1483 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily 1484 // make access control a part of SFINAE for the purposes of checking 1485 // type traits. 1486 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11) 1487 break; 1488 1489 SourceLocation Loc = Diags.getCurrentDiagLoc(); 1490 1491 // Suppress this diagnostic. 1492 ++NumSFINAEErrors; 1493 1494 // Make a copy of this suppressed diagnostic and store it with the 1495 // template-deduction information. 1496 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1497 Diagnostic DiagInfo(&Diags); 1498 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1499 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1500 } 1501 1502 Diags.setLastDiagnosticIgnored(true); 1503 Diags.Clear(); 1504 1505 // Now the diagnostic state is clear, produce a C++98 compatibility 1506 // warning. 1507 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control); 1508 1509 // The last diagnostic which Sema produced was ignored. Suppress any 1510 // notes attached to it. 1511 Diags.setLastDiagnosticIgnored(true); 1512 return; 1513 } 1514 1515 case DiagnosticIDs::SFINAE_Suppress: 1516 // Make a copy of this suppressed diagnostic and store it with the 1517 // template-deduction information; 1518 if (*Info) { 1519 Diagnostic DiagInfo(&Diags); 1520 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(), 1521 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1522 } 1523 1524 // Suppress this diagnostic. 1525 Diags.setLastDiagnosticIgnored(true); 1526 Diags.Clear(); 1527 return; 1528 } 1529 } 1530 1531 // Copy the diagnostic printing policy over the ASTContext printing policy. 1532 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292 1533 Context.setPrintingPolicy(getPrintingPolicy()); 1534 1535 // Emit the diagnostic. 1536 if (!Diags.EmitCurrentDiagnostic()) 1537 return; 1538 1539 // If this is not a note, and we're in a template instantiation 1540 // that is different from the last template instantiation where 1541 // we emitted an error, print a template instantiation 1542 // backtrace. 1543 if (!DiagnosticIDs::isBuiltinNote(DiagID)) 1544 PrintContextStack(); 1545 } 1546 1547 Sema::SemaDiagnosticBuilder 1548 Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) { 1549 return Diag(Loc, PD.getDiagID(), DeferHint) << PD; 1550 } 1551 1552 bool Sema::hasUncompilableErrorOccurred() const { 1553 if (getDiagnostics().hasUncompilableErrorOccurred()) 1554 return true; 1555 auto *FD = dyn_cast<FunctionDecl>(CurContext); 1556 if (!FD) 1557 return false; 1558 auto Loc = DeviceDeferredDiags.find(FD); 1559 if (Loc == DeviceDeferredDiags.end()) 1560 return false; 1561 for (auto PDAt : Loc->second) { 1562 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID())) 1563 return true; 1564 } 1565 return false; 1566 } 1567 1568 // Print notes showing how we can reach FD starting from an a priori 1569 // known-callable function. 1570 static void emitCallStackNotes(Sema &S, FunctionDecl *FD) { 1571 auto FnIt = S.DeviceKnownEmittedFns.find(FD); 1572 while (FnIt != S.DeviceKnownEmittedFns.end()) { 1573 // Respect error limit. 1574 if (S.Diags.hasFatalErrorOccurred()) 1575 return; 1576 DiagnosticBuilder Builder( 1577 S.Diags.Report(FnIt->second.Loc, diag::note_called_by)); 1578 Builder << FnIt->second.FD; 1579 FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD); 1580 } 1581 } 1582 1583 namespace { 1584 1585 /// Helper class that emits deferred diagnostic messages if an entity directly 1586 /// or indirectly using the function that causes the deferred diagnostic 1587 /// messages is known to be emitted. 1588 /// 1589 /// During parsing of AST, certain diagnostic messages are recorded as deferred 1590 /// diagnostics since it is unknown whether the functions containing such 1591 /// diagnostics will be emitted. A list of potentially emitted functions and 1592 /// variables that may potentially trigger emission of functions are also 1593 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions 1594 /// by each function to emit deferred diagnostics. 1595 /// 1596 /// During the visit, certain OpenMP directives or initializer of variables 1597 /// with certain OpenMP attributes will cause subsequent visiting of any 1598 /// functions enter a state which is called OpenMP device context in this 1599 /// implementation. The state is exited when the directive or initializer is 1600 /// exited. This state can change the emission states of subsequent uses 1601 /// of functions. 1602 /// 1603 /// Conceptually the functions or variables to be visited form a use graph 1604 /// where the parent node uses the child node. At any point of the visit, 1605 /// the tree nodes traversed from the tree root to the current node form a use 1606 /// stack. The emission state of the current node depends on two factors: 1607 /// 1. the emission state of the root node 1608 /// 2. whether the current node is in OpenMP device context 1609 /// If the function is decided to be emitted, its contained deferred diagnostics 1610 /// are emitted, together with the information about the use stack. 1611 /// 1612 class DeferredDiagnosticsEmitter 1613 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> { 1614 public: 1615 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited; 1616 1617 // Whether the function is already in the current use-path. 1618 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath; 1619 1620 // The current use-path. 1621 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath; 1622 1623 // Whether the visiting of the function has been done. Done[0] is for the 1624 // case not in OpenMP device context. Done[1] is for the case in OpenMP 1625 // device context. We need two sets because diagnostics emission may be 1626 // different depending on whether it is in OpenMP device context. 1627 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2]; 1628 1629 // Emission state of the root node of the current use graph. 1630 bool ShouldEmitRootNode; 1631 1632 // Current OpenMP device context level. It is initialized to 0 and each 1633 // entering of device context increases it by 1 and each exit decreases 1634 // it by 1. Non-zero value indicates it is currently in device context. 1635 unsigned InOMPDeviceContext; 1636 1637 DeferredDiagnosticsEmitter(Sema &S) 1638 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {} 1639 1640 bool shouldVisitDiscardedStmt() const { return false; } 1641 1642 void VisitOMPTargetDirective(OMPTargetDirective *Node) { 1643 ++InOMPDeviceContext; 1644 Inherited::VisitOMPTargetDirective(Node); 1645 --InOMPDeviceContext; 1646 } 1647 1648 void visitUsedDecl(SourceLocation Loc, Decl *D) { 1649 if (isa<VarDecl>(D)) 1650 return; 1651 if (auto *FD = dyn_cast<FunctionDecl>(D)) 1652 checkFunc(Loc, FD); 1653 else 1654 Inherited::visitUsedDecl(Loc, D); 1655 } 1656 1657 void checkVar(VarDecl *VD) { 1658 assert(VD->isFileVarDecl() && 1659 "Should only check file-scope variables"); 1660 if (auto *Init = VD->getInit()) { 1661 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); 1662 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost || 1663 *DevTy == OMPDeclareTargetDeclAttr::DT_Any); 1664 if (IsDev) 1665 ++InOMPDeviceContext; 1666 this->Visit(Init); 1667 if (IsDev) 1668 --InOMPDeviceContext; 1669 } 1670 } 1671 1672 void checkFunc(SourceLocation Loc, FunctionDecl *FD) { 1673 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0]; 1674 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back(); 1675 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) || 1676 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD)) 1677 return; 1678 // Finalize analysis of OpenMP-specific constructs. 1679 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 && 1680 (ShouldEmitRootNode || InOMPDeviceContext)) 1681 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); 1682 if (Caller) 1683 S.DeviceKnownEmittedFns[FD] = {Caller, Loc}; 1684 // Always emit deferred diagnostics for the direct users. This does not 1685 // lead to explosion of diagnostics since each user is visited at most 1686 // twice. 1687 if (ShouldEmitRootNode || InOMPDeviceContext) 1688 emitDeferredDiags(FD, Caller); 1689 // Do not revisit a function if the function body has been completely 1690 // visited before. 1691 if (!Done.insert(FD).second) 1692 return; 1693 InUsePath.insert(FD); 1694 UsePath.push_back(FD); 1695 if (auto *S = FD->getBody()) { 1696 this->Visit(S); 1697 } 1698 UsePath.pop_back(); 1699 InUsePath.erase(FD); 1700 } 1701 1702 void checkRecordedDecl(Decl *D) { 1703 if (auto *FD = dyn_cast<FunctionDecl>(D)) { 1704 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) == 1705 Sema::FunctionEmissionStatus::Emitted; 1706 checkFunc(SourceLocation(), FD); 1707 } else 1708 checkVar(cast<VarDecl>(D)); 1709 } 1710 1711 // Emit any deferred diagnostics for FD 1712 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) { 1713 auto It = S.DeviceDeferredDiags.find(FD); 1714 if (It == S.DeviceDeferredDiags.end()) 1715 return; 1716 bool HasWarningOrError = false; 1717 bool FirstDiag = true; 1718 for (PartialDiagnosticAt &PDAt : It->second) { 1719 // Respect error limit. 1720 if (S.Diags.hasFatalErrorOccurred()) 1721 return; 1722 const SourceLocation &Loc = PDAt.first; 1723 const PartialDiagnostic &PD = PDAt.second; 1724 HasWarningOrError |= 1725 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >= 1726 DiagnosticsEngine::Warning; 1727 { 1728 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID())); 1729 PD.Emit(Builder); 1730 } 1731 // Emit the note on the first diagnostic in case too many diagnostics 1732 // cause the note not emitted. 1733 if (FirstDiag && HasWarningOrError && ShowCallStack) { 1734 emitCallStackNotes(S, FD); 1735 FirstDiag = false; 1736 } 1737 } 1738 } 1739 }; 1740 } // namespace 1741 1742 void Sema::emitDeferredDiags() { 1743 if (ExternalSource) 1744 ExternalSource->ReadDeclsToCheckForDeferredDiags( 1745 DeclsToCheckForDeferredDiags); 1746 1747 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) || 1748 DeclsToCheckForDeferredDiags.empty()) 1749 return; 1750 1751 DeferredDiagnosticsEmitter DDE(*this); 1752 for (auto D : DeclsToCheckForDeferredDiags) 1753 DDE.checkRecordedDecl(D); 1754 } 1755 1756 // In CUDA, there are some constructs which may appear in semantically-valid 1757 // code, but trigger errors if we ever generate code for the function in which 1758 // they appear. Essentially every construct you're not allowed to use on the 1759 // device falls into this category, because you are allowed to use these 1760 // constructs in a __host__ __device__ function, but only if that function is 1761 // never codegen'ed on the device. 1762 // 1763 // To handle semantic checking for these constructs, we keep track of the set of 1764 // functions we know will be emitted, either because we could tell a priori that 1765 // they would be emitted, or because they were transitively called by a 1766 // known-emitted function. 1767 // 1768 // We also keep a partial call graph of which not-known-emitted functions call 1769 // which other not-known-emitted functions. 1770 // 1771 // When we see something which is illegal if the current function is emitted 1772 // (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or 1773 // CheckCUDACall), we first check if the current function is known-emitted. If 1774 // so, we immediately output the diagnostic. 1775 // 1776 // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags 1777 // until we discover that the function is known-emitted, at which point we take 1778 // it out of this map and emit the diagnostic. 1779 1780 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc, 1781 unsigned DiagID, 1782 FunctionDecl *Fn, Sema &S) 1783 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn), 1784 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) { 1785 switch (K) { 1786 case K_Nop: 1787 break; 1788 case K_Immediate: 1789 case K_ImmediateWithCallStack: 1790 ImmediateDiag.emplace( 1791 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID)); 1792 break; 1793 case K_Deferred: 1794 assert(Fn && "Must have a function to attach the deferred diag to."); 1795 auto &Diags = S.DeviceDeferredDiags[Fn]; 1796 PartialDiagId.emplace(Diags.size()); 1797 Diags.emplace_back(Loc, S.PDiag(DiagID)); 1798 break; 1799 } 1800 } 1801 1802 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D) 1803 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn), 1804 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag), 1805 PartialDiagId(D.PartialDiagId) { 1806 // Clean the previous diagnostics. 1807 D.ShowCallStack = false; 1808 D.ImmediateDiag.reset(); 1809 D.PartialDiagId.reset(); 1810 } 1811 1812 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() { 1813 if (ImmediateDiag) { 1814 // Emit our diagnostic and, if it was a warning or error, output a callstack 1815 // if Fn isn't a priori known-emitted. 1816 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel( 1817 DiagID, Loc) >= DiagnosticsEngine::Warning; 1818 ImmediateDiag.reset(); // Emit the immediate diag. 1819 if (IsWarningOrError && ShowCallStack) 1820 emitCallStackNotes(S, Fn); 1821 } else { 1822 assert((!PartialDiagId || ShowCallStack) && 1823 "Must always show call stack for deferred diags."); 1824 } 1825 } 1826 1827 Sema::SemaDiagnosticBuilder 1828 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) { 1829 FD = FD ? FD : getCurFunctionDecl(); 1830 if (LangOpts.OpenMP) 1831 return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD) 1832 : diagIfOpenMPHostCode(Loc, DiagID, FD); 1833 if (getLangOpts().CUDA) 1834 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID) 1835 : CUDADiagIfHostCode(Loc, DiagID); 1836 1837 if (getLangOpts().SYCLIsDevice) 1838 return SYCLDiagIfDeviceCode(Loc, DiagID); 1839 1840 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID, 1841 FD, *this); 1842 } 1843 1844 Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID, 1845 bool DeferHint) { 1846 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID); 1847 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag && 1848 DiagnosticIDs::isDeferrable(DiagID) && 1849 (DeferHint || DeferDiags || !IsError); 1850 auto SetIsLastErrorImmediate = [&](bool Flag) { 1851 if (IsError) 1852 IsLastErrorImmediate = Flag; 1853 }; 1854 if (!ShouldDefer) { 1855 SetIsLastErrorImmediate(true); 1856 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, 1857 DiagID, getCurFunctionDecl(), *this); 1858 } 1859 1860 SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice 1861 ? CUDADiagIfDeviceCode(Loc, DiagID) 1862 : CUDADiagIfHostCode(Loc, DiagID); 1863 SetIsLastErrorImmediate(DB.isImmediate()); 1864 return DB; 1865 } 1866 1867 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) { 1868 if (isUnevaluatedContext() || Ty.isNull()) 1869 return; 1870 1871 // The original idea behind checkTypeSupport function is that unused 1872 // declarations can be replaced with an array of bytes of the same size during 1873 // codegen, such replacement doesn't seem to be possible for types without 1874 // constant byte size like zero length arrays. So, do a deep check for SYCL. 1875 if (D && LangOpts.SYCLIsDevice) { 1876 llvm::DenseSet<QualType> Visited; 1877 deepTypeCheckForSYCLDevice(Loc, Visited, D); 1878 } 1879 1880 Decl *C = cast<Decl>(getCurLexicalContext()); 1881 1882 // Memcpy operations for structs containing a member with unsupported type 1883 // are ok, though. 1884 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) { 1885 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 1886 MD->isTrivial()) 1887 return; 1888 1889 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD)) 1890 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial()) 1891 return; 1892 } 1893 1894 // Try to associate errors with the lexical context, if that is a function, or 1895 // the value declaration otherwise. 1896 FunctionDecl *FD = isa<FunctionDecl>(C) ? cast<FunctionDecl>(C) 1897 : dyn_cast_or_null<FunctionDecl>(D); 1898 1899 auto CheckDeviceType = [&](QualType Ty) { 1900 if (Ty->isDependentType()) 1901 return; 1902 1903 if (Ty->isBitIntType()) { 1904 if (!Context.getTargetInfo().hasBitIntType()) { 1905 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1906 if (D) 1907 PD << D; 1908 else 1909 PD << "expression"; 1910 targetDiag(Loc, PD, FD) 1911 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/ 1912 << Ty << Context.getTargetInfo().getTriple().str(); 1913 } 1914 return; 1915 } 1916 1917 // Check if we are dealing with two 'long double' but with different 1918 // semantics. 1919 bool LongDoubleMismatched = false; 1920 if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) { 1921 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty); 1922 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() && 1923 !Context.getTargetInfo().hasFloat128Type()) || 1924 (&Sem == &llvm::APFloat::PPCDoubleDouble() && 1925 !Context.getTargetInfo().hasIbm128Type())) 1926 LongDoubleMismatched = true; 1927 } 1928 1929 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) || 1930 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) || 1931 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) || 1932 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 && 1933 !Context.getTargetInfo().hasInt128Type()) || 1934 LongDoubleMismatched) { 1935 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1936 if (D) 1937 PD << D; 1938 else 1939 PD << "expression"; 1940 1941 if (targetDiag(Loc, PD, FD) 1942 << true /*show bit size*/ 1943 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty 1944 << false /*return*/ << Context.getTargetInfo().getTriple().str()) { 1945 if (D) 1946 D->setInvalidDecl(); 1947 } 1948 if (D) 1949 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1950 } 1951 }; 1952 1953 auto CheckType = [&](QualType Ty, bool IsRetTy = false) { 1954 if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice) || 1955 LangOpts.CUDAIsDevice) 1956 CheckDeviceType(Ty); 1957 1958 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType(); 1959 const TargetInfo &TI = Context.getTargetInfo(); 1960 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) { 1961 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1962 if (D) 1963 PD << D; 1964 else 1965 PD << "expression"; 1966 1967 if (Diag(Loc, PD, FD) 1968 << false /*show bit size*/ << 0 << Ty << false /*return*/ 1969 << Context.getTargetInfo().getTriple().str()) { 1970 if (D) 1971 D->setInvalidDecl(); 1972 } 1973 if (D) 1974 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1975 } 1976 1977 bool IsDouble = UnqualTy == Context.DoubleTy; 1978 bool IsFloat = UnqualTy == Context.FloatTy; 1979 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) { 1980 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1981 if (D) 1982 PD << D; 1983 else 1984 PD << "expression"; 1985 1986 if (Diag(Loc, PD, FD) 1987 << false /*show bit size*/ << 0 << Ty << true /*return*/ 1988 << Context.getTargetInfo().getTriple().str()) { 1989 if (D) 1990 D->setInvalidDecl(); 1991 } 1992 if (D) 1993 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1994 } 1995 }; 1996 1997 CheckType(Ty); 1998 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) { 1999 for (const auto &ParamTy : FPTy->param_types()) 2000 CheckType(ParamTy); 2001 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true); 2002 } 2003 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty)) 2004 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true); 2005 } 2006 2007 /// Looks through the macro-expansion chain for the given 2008 /// location, looking for a macro expansion with the given name. 2009 /// If one is found, returns true and sets the location to that 2010 /// expansion loc. 2011 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) { 2012 SourceLocation loc = locref; 2013 if (!loc.isMacroID()) return false; 2014 2015 // There's no good way right now to look at the intermediate 2016 // expansions, so just jump to the expansion location. 2017 loc = getSourceManager().getExpansionLoc(loc); 2018 2019 // If that's written with the name, stop here. 2020 SmallString<16> buffer; 2021 if (getPreprocessor().getSpelling(loc, buffer) == name) { 2022 locref = loc; 2023 return true; 2024 } 2025 return false; 2026 } 2027 2028 /// Determines the active Scope associated with the given declaration 2029 /// context. 2030 /// 2031 /// This routine maps a declaration context to the active Scope object that 2032 /// represents that declaration context in the parser. It is typically used 2033 /// from "scope-less" code (e.g., template instantiation, lazy creation of 2034 /// declarations) that injects a name for name-lookup purposes and, therefore, 2035 /// must update the Scope. 2036 /// 2037 /// \returns The scope corresponding to the given declaraion context, or NULL 2038 /// if no such scope is open. 2039 Scope *Sema::getScopeForContext(DeclContext *Ctx) { 2040 2041 if (!Ctx) 2042 return nullptr; 2043 2044 Ctx = Ctx->getPrimaryContext(); 2045 for (Scope *S = getCurScope(); S; S = S->getParent()) { 2046 // Ignore scopes that cannot have declarations. This is important for 2047 // out-of-line definitions of static class members. 2048 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) 2049 if (DeclContext *Entity = S->getEntity()) 2050 if (Ctx == Entity->getPrimaryContext()) 2051 return S; 2052 } 2053 2054 return nullptr; 2055 } 2056 2057 /// Enter a new function scope 2058 void Sema::PushFunctionScope() { 2059 if (FunctionScopes.empty() && CachedFunctionScope) { 2060 // Use CachedFunctionScope to avoid allocating memory when possible. 2061 CachedFunctionScope->Clear(); 2062 FunctionScopes.push_back(CachedFunctionScope.release()); 2063 } else { 2064 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); 2065 } 2066 if (LangOpts.OpenMP) 2067 pushOpenMPFunctionRegion(); 2068 } 2069 2070 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { 2071 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(), 2072 BlockScope, Block)); 2073 } 2074 2075 LambdaScopeInfo *Sema::PushLambdaScope() { 2076 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics()); 2077 FunctionScopes.push_back(LSI); 2078 return LSI; 2079 } 2080 2081 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) { 2082 if (LambdaScopeInfo *const LSI = getCurLambda()) { 2083 LSI->AutoTemplateParameterDepth = Depth; 2084 return; 2085 } 2086 llvm_unreachable( 2087 "Remove assertion if intentionally called in a non-lambda context."); 2088 } 2089 2090 // Check that the type of the VarDecl has an accessible copy constructor and 2091 // resolve its destructor's exception specification. 2092 // This also performs initialization of block variables when they are moved 2093 // to the heap. It uses the same rules as applicable for implicit moves 2094 // according to the C++ standard in effect ([class.copy.elision]p3). 2095 static void checkEscapingByref(VarDecl *VD, Sema &S) { 2096 QualType T = VD->getType(); 2097 EnterExpressionEvaluationContext scope( 2098 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated); 2099 SourceLocation Loc = VD->getLocation(); 2100 Expr *VarRef = 2101 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); 2102 ExprResult Result; 2103 auto IE = InitializedEntity::InitializeBlock(Loc, T); 2104 if (S.getLangOpts().CPlusPlus2b) { 2105 auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr, 2106 VK_XValue, FPOptionsOverride()); 2107 Result = S.PerformCopyInitialization(IE, SourceLocation(), E); 2108 } else { 2109 Result = S.PerformMoveOrCopyInitialization( 2110 IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible}, 2111 VarRef); 2112 } 2113 2114 if (!Result.isInvalid()) { 2115 Result = S.MaybeCreateExprWithCleanups(Result); 2116 Expr *Init = Result.getAs<Expr>(); 2117 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init)); 2118 } 2119 2120 // The destructor's exception specification is needed when IRGen generates 2121 // block copy/destroy functions. Resolve it here. 2122 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 2123 if (CXXDestructorDecl *DD = RD->getDestructor()) { 2124 auto *FPT = DD->getType()->getAs<FunctionProtoType>(); 2125 S.ResolveExceptionSpec(Loc, FPT); 2126 } 2127 } 2128 2129 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { 2130 // Set the EscapingByref flag of __block variables captured by 2131 // escaping blocks. 2132 for (const BlockDecl *BD : FSI.Blocks) { 2133 for (const BlockDecl::Capture &BC : BD->captures()) { 2134 VarDecl *VD = BC.getVariable(); 2135 if (VD->hasAttr<BlocksAttr>()) { 2136 // Nothing to do if this is a __block variable captured by a 2137 // non-escaping block. 2138 if (BD->doesNotEscape()) 2139 continue; 2140 VD->setEscapingByref(); 2141 } 2142 // Check whether the captured variable is or contains an object of 2143 // non-trivial C union type. 2144 QualType CapType = BC.getVariable()->getType(); 2145 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() || 2146 CapType.hasNonTrivialToPrimitiveCopyCUnion()) 2147 S.checkNonTrivialCUnion(BC.getVariable()->getType(), 2148 BD->getCaretLocation(), 2149 Sema::NTCUC_BlockCapture, 2150 Sema::NTCUK_Destruct|Sema::NTCUK_Copy); 2151 } 2152 } 2153 2154 for (VarDecl *VD : FSI.ByrefBlockVars) { 2155 // __block variables might require us to capture a copy-initializer. 2156 if (!VD->isEscapingByref()) 2157 continue; 2158 // It's currently invalid to ever have a __block variable with an 2159 // array type; should we diagnose that here? 2160 // Regardless, we don't want to ignore array nesting when 2161 // constructing this copy. 2162 if (VD->getType()->isStructureOrClassType()) 2163 checkEscapingByref(VD, S); 2164 } 2165 } 2166 2167 /// Pop a function (or block or lambda or captured region) scope from the stack. 2168 /// 2169 /// \param WP The warning policy to use for CFG-based warnings, or null if such 2170 /// warnings should not be produced. 2171 /// \param D The declaration corresponding to this function scope, if producing 2172 /// CFG-based warnings. 2173 /// \param BlockType The type of the block expression, if D is a BlockDecl. 2174 Sema::PoppedFunctionScopePtr 2175 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, 2176 const Decl *D, QualType BlockType) { 2177 assert(!FunctionScopes.empty() && "mismatched push/pop!"); 2178 2179 markEscapingByrefs(*FunctionScopes.back(), *this); 2180 2181 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(), 2182 PoppedFunctionScopeDeleter(this)); 2183 2184 if (LangOpts.OpenMP) 2185 popOpenMPFunctionRegion(Scope.get()); 2186 2187 // Issue any analysis-based warnings. 2188 if (WP && D) 2189 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType); 2190 else 2191 for (const auto &PUD : Scope->PossiblyUnreachableDiags) 2192 Diag(PUD.Loc, PUD.PD); 2193 2194 return Scope; 2195 } 2196 2197 void Sema::PoppedFunctionScopeDeleter:: 2198 operator()(sema::FunctionScopeInfo *Scope) const { 2199 // Stash the function scope for later reuse if it's for a normal function. 2200 if (Scope->isPlainFunction() && !Self->CachedFunctionScope) 2201 Self->CachedFunctionScope.reset(Scope); 2202 else 2203 delete Scope; 2204 } 2205 2206 void Sema::PushCompoundScope(bool IsStmtExpr) { 2207 getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr)); 2208 } 2209 2210 void Sema::PopCompoundScope() { 2211 FunctionScopeInfo *CurFunction = getCurFunction(); 2212 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop"); 2213 2214 CurFunction->CompoundScopes.pop_back(); 2215 } 2216 2217 /// Determine whether any errors occurred within this function/method/ 2218 /// block. 2219 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const { 2220 return getCurFunction()->hasUnrecoverableErrorOccurred(); 2221 } 2222 2223 void Sema::setFunctionHasBranchIntoScope() { 2224 if (!FunctionScopes.empty()) 2225 FunctionScopes.back()->setHasBranchIntoScope(); 2226 } 2227 2228 void Sema::setFunctionHasBranchProtectedScope() { 2229 if (!FunctionScopes.empty()) 2230 FunctionScopes.back()->setHasBranchProtectedScope(); 2231 } 2232 2233 void Sema::setFunctionHasIndirectGoto() { 2234 if (!FunctionScopes.empty()) 2235 FunctionScopes.back()->setHasIndirectGoto(); 2236 } 2237 2238 void Sema::setFunctionHasMustTail() { 2239 if (!FunctionScopes.empty()) 2240 FunctionScopes.back()->setHasMustTail(); 2241 } 2242 2243 BlockScopeInfo *Sema::getCurBlock() { 2244 if (FunctionScopes.empty()) 2245 return nullptr; 2246 2247 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back()); 2248 if (CurBSI && CurBSI->TheDecl && 2249 !CurBSI->TheDecl->Encloses(CurContext)) { 2250 // We have switched contexts due to template instantiation. 2251 assert(!CodeSynthesisContexts.empty()); 2252 return nullptr; 2253 } 2254 2255 return CurBSI; 2256 } 2257 2258 FunctionScopeInfo *Sema::getEnclosingFunction() const { 2259 if (FunctionScopes.empty()) 2260 return nullptr; 2261 2262 for (int e = FunctionScopes.size() - 1; e >= 0; --e) { 2263 if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) 2264 continue; 2265 return FunctionScopes[e]; 2266 } 2267 return nullptr; 2268 } 2269 2270 LambdaScopeInfo *Sema::getEnclosingLambda() const { 2271 for (auto *Scope : llvm::reverse(FunctionScopes)) { 2272 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) { 2273 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) { 2274 // We have switched contexts due to template instantiation. 2275 // FIXME: We should swap out the FunctionScopes during code synthesis 2276 // so that we don't need to check for this. 2277 assert(!CodeSynthesisContexts.empty()); 2278 return nullptr; 2279 } 2280 return LSI; 2281 } 2282 } 2283 return nullptr; 2284 } 2285 2286 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) { 2287 if (FunctionScopes.empty()) 2288 return nullptr; 2289 2290 auto I = FunctionScopes.rbegin(); 2291 if (IgnoreNonLambdaCapturingScope) { 2292 auto E = FunctionScopes.rend(); 2293 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I)) 2294 ++I; 2295 if (I == E) 2296 return nullptr; 2297 } 2298 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I); 2299 if (CurLSI && CurLSI->Lambda && 2300 !CurLSI->Lambda->Encloses(CurContext)) { 2301 // We have switched contexts due to template instantiation. 2302 assert(!CodeSynthesisContexts.empty()); 2303 return nullptr; 2304 } 2305 2306 return CurLSI; 2307 } 2308 2309 // We have a generic lambda if we parsed auto parameters, or we have 2310 // an associated template parameter list. 2311 LambdaScopeInfo *Sema::getCurGenericLambda() { 2312 if (LambdaScopeInfo *LSI = getCurLambda()) { 2313 return (LSI->TemplateParams.size() || 2314 LSI->GLTemplateParameterList) ? LSI : nullptr; 2315 } 2316 return nullptr; 2317 } 2318 2319 2320 void Sema::ActOnComment(SourceRange Comment) { 2321 if (!LangOpts.RetainCommentsFromSystemHeaders && 2322 SourceMgr.isInSystemHeader(Comment.getBegin())) 2323 return; 2324 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false); 2325 if (RC.isAlmostTrailingComment()) { 2326 SourceRange MagicMarkerRange(Comment.getBegin(), 2327 Comment.getBegin().getLocWithOffset(3)); 2328 StringRef MagicMarkerText; 2329 switch (RC.getKind()) { 2330 case RawComment::RCK_OrdinaryBCPL: 2331 MagicMarkerText = "///<"; 2332 break; 2333 case RawComment::RCK_OrdinaryC: 2334 MagicMarkerText = "/**<"; 2335 break; 2336 default: 2337 llvm_unreachable("if this is an almost Doxygen comment, " 2338 "it should be ordinary"); 2339 } 2340 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) << 2341 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText); 2342 } 2343 Context.addComment(RC); 2344 } 2345 2346 // Pin this vtable to this file. 2347 ExternalSemaSource::~ExternalSemaSource() {} 2348 char ExternalSemaSource::ID; 2349 2350 void ExternalSemaSource::ReadMethodPool(Selector Sel) { } 2351 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { } 2352 2353 void ExternalSemaSource::ReadKnownNamespaces( 2354 SmallVectorImpl<NamespaceDecl *> &Namespaces) { 2355 } 2356 2357 void ExternalSemaSource::ReadUndefinedButUsed( 2358 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {} 2359 2360 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< 2361 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} 2362 2363 /// Figure out if an expression could be turned into a call. 2364 /// 2365 /// Use this when trying to recover from an error where the programmer may have 2366 /// written just the name of a function instead of actually calling it. 2367 /// 2368 /// \param E - The expression to examine. 2369 /// \param ZeroArgCallReturnTy - If the expression can be turned into a call 2370 /// with no arguments, this parameter is set to the type returned by such a 2371 /// call; otherwise, it is set to an empty QualType. 2372 /// \param OverloadSet - If the expression is an overloaded function 2373 /// name, this parameter is populated with the decls of the various overloads. 2374 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, 2375 UnresolvedSetImpl &OverloadSet) { 2376 ZeroArgCallReturnTy = QualType(); 2377 OverloadSet.clear(); 2378 2379 const OverloadExpr *Overloads = nullptr; 2380 bool IsMemExpr = false; 2381 if (E.getType() == Context.OverloadTy) { 2382 OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); 2383 2384 // Ignore overloads that are pointer-to-member constants. 2385 if (FR.HasFormOfMemberPointer) 2386 return false; 2387 2388 Overloads = FR.Expression; 2389 } else if (E.getType() == Context.BoundMemberTy) { 2390 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens()); 2391 IsMemExpr = true; 2392 } 2393 2394 bool Ambiguous = false; 2395 bool IsMV = false; 2396 2397 if (Overloads) { 2398 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), 2399 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { 2400 OverloadSet.addDecl(*it); 2401 2402 // Check whether the function is a non-template, non-member which takes no 2403 // arguments. 2404 if (IsMemExpr) 2405 continue; 2406 if (const FunctionDecl *OverloadDecl 2407 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { 2408 if (OverloadDecl->getMinRequiredArguments() == 0) { 2409 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous && 2410 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() || 2411 OverloadDecl->isCPUSpecificMultiVersion()))) { 2412 ZeroArgCallReturnTy = QualType(); 2413 Ambiguous = true; 2414 } else { 2415 ZeroArgCallReturnTy = OverloadDecl->getReturnType(); 2416 IsMV = OverloadDecl->isCPUDispatchMultiVersion() || 2417 OverloadDecl->isCPUSpecificMultiVersion(); 2418 } 2419 } 2420 } 2421 } 2422 2423 // If it's not a member, use better machinery to try to resolve the call 2424 if (!IsMemExpr) 2425 return !ZeroArgCallReturnTy.isNull(); 2426 } 2427 2428 // Attempt to call the member with no arguments - this will correctly handle 2429 // member templates with defaults/deduction of template arguments, overloads 2430 // with default arguments, etc. 2431 if (IsMemExpr && !E.isTypeDependent()) { 2432 Sema::TentativeAnalysisScope Trap(*this); 2433 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), 2434 None, SourceLocation()); 2435 if (R.isUsable()) { 2436 ZeroArgCallReturnTy = R.get()->getType(); 2437 return true; 2438 } 2439 return false; 2440 } 2441 2442 if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { 2443 if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { 2444 if (Fun->getMinRequiredArguments() == 0) 2445 ZeroArgCallReturnTy = Fun->getReturnType(); 2446 return true; 2447 } 2448 } 2449 2450 // We don't have an expression that's convenient to get a FunctionDecl from, 2451 // but we can at least check if the type is "function of 0 arguments". 2452 QualType ExprTy = E.getType(); 2453 const FunctionType *FunTy = nullptr; 2454 QualType PointeeTy = ExprTy->getPointeeType(); 2455 if (!PointeeTy.isNull()) 2456 FunTy = PointeeTy->getAs<FunctionType>(); 2457 if (!FunTy) 2458 FunTy = ExprTy->getAs<FunctionType>(); 2459 2460 if (const FunctionProtoType *FPT = 2461 dyn_cast_or_null<FunctionProtoType>(FunTy)) { 2462 if (FPT->getNumParams() == 0) 2463 ZeroArgCallReturnTy = FunTy->getReturnType(); 2464 return true; 2465 } 2466 return false; 2467 } 2468 2469 /// Give notes for a set of overloads. 2470 /// 2471 /// A companion to tryExprAsCall. In cases when the name that the programmer 2472 /// wrote was an overloaded function, we may be able to make some guesses about 2473 /// plausible overloads based on their return types; such guesses can be handed 2474 /// off to this method to be emitted as notes. 2475 /// 2476 /// \param Overloads - The overloads to note. 2477 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to 2478 /// -fshow-overloads=best, this is the location to attach to the note about too 2479 /// many candidates. Typically this will be the location of the original 2480 /// ill-formed expression. 2481 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, 2482 const SourceLocation FinalNoteLoc) { 2483 unsigned ShownOverloads = 0; 2484 unsigned SuppressedOverloads = 0; 2485 for (UnresolvedSetImpl::iterator It = Overloads.begin(), 2486 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2487 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) { 2488 ++SuppressedOverloads; 2489 continue; 2490 } 2491 2492 NamedDecl *Fn = (*It)->getUnderlyingDecl(); 2493 // Don't print overloads for non-default multiversioned functions. 2494 if (const auto *FD = Fn->getAsFunction()) { 2495 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() && 2496 !FD->getAttr<TargetAttr>()->isDefaultVersion()) 2497 continue; 2498 } 2499 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call); 2500 ++ShownOverloads; 2501 } 2502 2503 S.Diags.overloadCandidatesShown(ShownOverloads); 2504 2505 if (SuppressedOverloads) 2506 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates) 2507 << SuppressedOverloads; 2508 } 2509 2510 static void notePlausibleOverloads(Sema &S, SourceLocation Loc, 2511 const UnresolvedSetImpl &Overloads, 2512 bool (*IsPlausibleResult)(QualType)) { 2513 if (!IsPlausibleResult) 2514 return noteOverloads(S, Overloads, Loc); 2515 2516 UnresolvedSet<2> PlausibleOverloads; 2517 for (OverloadExpr::decls_iterator It = Overloads.begin(), 2518 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2519 const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It); 2520 QualType OverloadResultTy = OverloadDecl->getReturnType(); 2521 if (IsPlausibleResult(OverloadResultTy)) 2522 PlausibleOverloads.addDecl(It.getDecl()); 2523 } 2524 noteOverloads(S, PlausibleOverloads, Loc); 2525 } 2526 2527 /// Determine whether the given expression can be called by just 2528 /// putting parentheses after it. Notably, expressions with unary 2529 /// operators can't be because the unary operator will start parsing 2530 /// outside the call. 2531 static bool IsCallableWithAppend(Expr *E) { 2532 E = E->IgnoreImplicit(); 2533 return (!isa<CStyleCastExpr>(E) && 2534 !isa<UnaryOperator>(E) && 2535 !isa<BinaryOperator>(E) && 2536 !isa<CXXOperatorCallExpr>(E)); 2537 } 2538 2539 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) { 2540 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2541 E = UO->getSubExpr(); 2542 2543 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { 2544 if (ULE->getNumDecls() == 0) 2545 return false; 2546 2547 const NamedDecl *ND = *ULE->decls_begin(); 2548 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2549 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion(); 2550 } 2551 return false; 2552 } 2553 2554 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, 2555 bool ForceComplain, 2556 bool (*IsPlausibleResult)(QualType)) { 2557 SourceLocation Loc = E.get()->getExprLoc(); 2558 SourceRange Range = E.get()->getSourceRange(); 2559 UnresolvedSet<4> Overloads; 2560 2561 // If this is a SFINAE context, don't try anything that might trigger ADL 2562 // prematurely. 2563 if (!isSFINAEContext()) { 2564 QualType ZeroArgCallTy; 2565 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) && 2566 !ZeroArgCallTy.isNull() && 2567 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) { 2568 // At this point, we know E is potentially callable with 0 2569 // arguments and that it returns something of a reasonable type, 2570 // so we can emit a fixit and carry on pretending that E was 2571 // actually a CallExpr. 2572 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd()); 2573 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2574 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range 2575 << (IsCallableWithAppend(E.get()) 2576 ? FixItHint::CreateInsertion(ParenInsertionLoc, 2577 "()") 2578 : FixItHint()); 2579 if (!IsMV) 2580 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2581 2582 // FIXME: Try this before emitting the fixit, and suppress diagnostics 2583 // while doing so. 2584 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None, 2585 Range.getEnd().getLocWithOffset(1)); 2586 return true; 2587 } 2588 } 2589 if (!ForceComplain) return false; 2590 2591 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2592 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range; 2593 if (!IsMV) 2594 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2595 E = ExprError(); 2596 return true; 2597 } 2598 2599 IdentifierInfo *Sema::getSuperIdentifier() const { 2600 if (!Ident_super) 2601 Ident_super = &Context.Idents.get("super"); 2602 return Ident_super; 2603 } 2604 2605 IdentifierInfo *Sema::getFloat128Identifier() const { 2606 if (!Ident___float128) 2607 Ident___float128 = &Context.Idents.get("__float128"); 2608 return Ident___float128; 2609 } 2610 2611 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, 2612 CapturedRegionKind K, 2613 unsigned OpenMPCaptureLevel) { 2614 auto *CSI = new CapturedRegionScopeInfo( 2615 getDiagnostics(), S, CD, RD, CD->getContextParam(), K, 2616 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0, 2617 OpenMPCaptureLevel); 2618 CSI->ReturnType = Context.VoidTy; 2619 FunctionScopes.push_back(CSI); 2620 } 2621 2622 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { 2623 if (FunctionScopes.empty()) 2624 return nullptr; 2625 2626 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); 2627 } 2628 2629 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & 2630 Sema::getMismatchingDeleteExpressions() const { 2631 return DeleteExprs; 2632 } 2633