1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the actions class which performs semantic analysis and 10 // builds an AST out of a parse stream. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "UsedDeclVisitor.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/ASTDiagnostic.h" 17 #include "clang/AST/Decl.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclFriend.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/ExprCXX.h" 23 #include "clang/AST/PrettyDeclStackTrace.h" 24 #include "clang/AST/StmtCXX.h" 25 #include "clang/Basic/DiagnosticOptions.h" 26 #include "clang/Basic/PartialDiagnostic.h" 27 #include "clang/Basic/SourceManager.h" 28 #include "clang/Basic/Stack.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "clang/Lex/HeaderSearch.h" 31 #include "clang/Lex/Preprocessor.h" 32 #include "clang/Sema/CXXFieldCollector.h" 33 #include "clang/Sema/DelayedDiagnostic.h" 34 #include "clang/Sema/ExternalSemaSource.h" 35 #include "clang/Sema/Initialization.h" 36 #include "clang/Sema/MultiplexExternalSemaSource.h" 37 #include "clang/Sema/ObjCMethodList.h" 38 #include "clang/Sema/Scope.h" 39 #include "clang/Sema/ScopeInfo.h" 40 #include "clang/Sema/SemaConsumer.h" 41 #include "clang/Sema/SemaInternal.h" 42 #include "clang/Sema/TemplateDeduction.h" 43 #include "clang/Sema/TemplateInstCallback.h" 44 #include "clang/Sema/TypoCorrection.h" 45 #include "llvm/ADT/DenseMap.h" 46 #include "llvm/ADT/SmallPtrSet.h" 47 #include "llvm/Support/TimeProfiler.h" 48 49 using namespace clang; 50 using namespace sema; 51 52 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) { 53 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts); 54 } 55 56 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); } 57 58 IdentifierInfo * 59 Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, 60 unsigned int Index) { 61 std::string InventedName; 62 llvm::raw_string_ostream OS(InventedName); 63 64 if (!ParamName) 65 OS << "auto:" << Index + 1; 66 else 67 OS << ParamName->getName() << ":auto"; 68 69 OS.flush(); 70 return &Context.Idents.get(OS.str()); 71 } 72 73 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, 74 const Preprocessor &PP) { 75 PrintingPolicy Policy = Context.getPrintingPolicy(); 76 // In diagnostics, we print _Bool as bool if the latter is defined as the 77 // former. 78 Policy.Bool = Context.getLangOpts().Bool; 79 if (!Policy.Bool) { 80 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) { 81 Policy.Bool = BoolMacro->isObjectLike() && 82 BoolMacro->getNumTokens() == 1 && 83 BoolMacro->getReplacementToken(0).is(tok::kw__Bool); 84 } 85 } 86 87 return Policy; 88 } 89 90 void Sema::ActOnTranslationUnitScope(Scope *S) { 91 TUScope = S; 92 PushDeclContext(S, Context.getTranslationUnitDecl()); 93 } 94 95 namespace clang { 96 namespace sema { 97 98 class SemaPPCallbacks : public PPCallbacks { 99 Sema *S = nullptr; 100 llvm::SmallVector<SourceLocation, 8> IncludeStack; 101 102 public: 103 void set(Sema &S) { this->S = &S; } 104 105 void reset() { S = nullptr; } 106 107 virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason, 108 SrcMgr::CharacteristicKind FileType, 109 FileID PrevFID) override { 110 if (!S) 111 return; 112 switch (Reason) { 113 case EnterFile: { 114 SourceManager &SM = S->getSourceManager(); 115 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc)); 116 if (IncludeLoc.isValid()) { 117 if (llvm::timeTraceProfilerEnabled()) { 118 const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc)); 119 llvm::timeTraceProfilerBegin( 120 "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>")); 121 } 122 123 IncludeStack.push_back(IncludeLoc); 124 S->DiagnoseNonDefaultPragmaAlignPack( 125 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude, 126 IncludeLoc); 127 } 128 break; 129 } 130 case ExitFile: 131 if (!IncludeStack.empty()) { 132 if (llvm::timeTraceProfilerEnabled()) 133 llvm::timeTraceProfilerEnd(); 134 135 S->DiagnoseNonDefaultPragmaAlignPack( 136 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit, 137 IncludeStack.pop_back_val()); 138 } 139 break; 140 default: 141 break; 142 } 143 } 144 }; 145 146 } // end namespace sema 147 } // end namespace clang 148 149 const unsigned Sema::MaxAlignmentExponent; 150 const unsigned Sema::MaximumAlignment; 151 152 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, 153 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) 154 : ExternalSource(nullptr), isMultiplexExternalSource(false), 155 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), 156 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), 157 SourceMgr(PP.getSourceManager()), CollectStats(false), 158 CodeCompleter(CodeCompleter), CurContext(nullptr), 159 OriginalLexicalContext(nullptr), MSStructPragmaOn(false), 160 MSPointerToMemberRepresentationMethod( 161 LangOpts.getMSPointerToMemberRepresentationMethod()), 162 VtorDispStack(LangOpts.getVtorDispMode()), 163 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)), 164 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr), 165 CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()), 166 CurInitSeg(nullptr), VisContext(nullptr), 167 PragmaAttributeCurrentTargetDecl(nullptr), 168 IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr), 169 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp), 170 StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr), 171 StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr), 172 MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr), 173 NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr), 174 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), 175 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), 176 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false), 177 TUKind(TUKind), NumSFINAEErrors(0), 178 FullyCheckedComparisonCategories( 179 static_cast<unsigned>(ComparisonCategoryType::Last) + 1), 180 SatisfactionCache(Context), AccessCheckingSFINAE(false), 181 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), 182 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), 183 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this), 184 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), 185 CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { 186 TUScope = nullptr; 187 isConstantEvaluatedOverride = false; 188 189 LoadedExternalKnownNamespaces = false; 190 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I) 191 NSNumberLiteralMethods[I] = nullptr; 192 193 if (getLangOpts().ObjC) 194 NSAPIObj.reset(new NSAPI(Context)); 195 196 if (getLangOpts().CPlusPlus) 197 FieldCollector.reset(new CXXFieldCollector()); 198 199 // Tell diagnostics how to render things from the AST library. 200 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); 201 202 ExprEvalContexts.emplace_back( 203 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{}, 204 nullptr, ExpressionEvaluationContextRecord::EK_Other); 205 206 // Initialization of data sharing attributes stack for OpenMP 207 InitDataSharingAttributesStack(); 208 209 std::unique_ptr<sema::SemaPPCallbacks> Callbacks = 210 std::make_unique<sema::SemaPPCallbacks>(); 211 SemaPPCallbackHandler = Callbacks.get(); 212 PP.addPPCallbacks(std::move(Callbacks)); 213 SemaPPCallbackHandler->set(*this); 214 } 215 216 // Anchor Sema's type info to this TU. 217 void Sema::anchor() {} 218 219 void Sema::addImplicitTypedef(StringRef Name, QualType T) { 220 DeclarationName DN = &Context.Idents.get(Name); 221 if (IdResolver.begin(DN) == IdResolver.end()) 222 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope); 223 } 224 225 void Sema::Initialize() { 226 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 227 SC->InitializeSema(*this); 228 229 // Tell the external Sema source about this Sema object. 230 if (ExternalSemaSource *ExternalSema 231 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 232 ExternalSema->InitializeSema(*this); 233 234 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we 235 // will not be able to merge any duplicate __va_list_tag decls correctly. 236 VAListTagName = PP.getIdentifierInfo("__va_list_tag"); 237 238 if (!TUScope) 239 return; 240 241 // Initialize predefined 128-bit integer types, if needed. 242 if (Context.getTargetInfo().hasInt128Type() || 243 (Context.getAuxTargetInfo() && 244 Context.getAuxTargetInfo()->hasInt128Type())) { 245 // If either of the 128-bit integer types are unavailable to name lookup, 246 // define them now. 247 DeclarationName Int128 = &Context.Idents.get("__int128_t"); 248 if (IdResolver.begin(Int128) == IdResolver.end()) 249 PushOnScopeChains(Context.getInt128Decl(), TUScope); 250 251 DeclarationName UInt128 = &Context.Idents.get("__uint128_t"); 252 if (IdResolver.begin(UInt128) == IdResolver.end()) 253 PushOnScopeChains(Context.getUInt128Decl(), TUScope); 254 } 255 256 257 // Initialize predefined Objective-C types: 258 if (getLangOpts().ObjC) { 259 // If 'SEL' does not yet refer to any declarations, make it refer to the 260 // predefined 'SEL'. 261 DeclarationName SEL = &Context.Idents.get("SEL"); 262 if (IdResolver.begin(SEL) == IdResolver.end()) 263 PushOnScopeChains(Context.getObjCSelDecl(), TUScope); 264 265 // If 'id' does not yet refer to any declarations, make it refer to the 266 // predefined 'id'. 267 DeclarationName Id = &Context.Idents.get("id"); 268 if (IdResolver.begin(Id) == IdResolver.end()) 269 PushOnScopeChains(Context.getObjCIdDecl(), TUScope); 270 271 // Create the built-in typedef for 'Class'. 272 DeclarationName Class = &Context.Idents.get("Class"); 273 if (IdResolver.begin(Class) == IdResolver.end()) 274 PushOnScopeChains(Context.getObjCClassDecl(), TUScope); 275 276 // Create the built-in forward declaratino for 'Protocol'. 277 DeclarationName Protocol = &Context.Idents.get("Protocol"); 278 if (IdResolver.begin(Protocol) == IdResolver.end()) 279 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope); 280 } 281 282 // Create the internal type for the *StringMakeConstantString builtins. 283 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString"); 284 if (IdResolver.begin(ConstantString) == IdResolver.end()) 285 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope); 286 287 // Initialize Microsoft "predefined C++ types". 288 if (getLangOpts().MSVCCompat) { 289 if (getLangOpts().CPlusPlus && 290 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) 291 PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class), 292 TUScope); 293 294 addImplicitTypedef("size_t", Context.getSizeType()); 295 } 296 297 // Initialize predefined OpenCL types and supported extensions and (optional) 298 // core features. 299 if (getLangOpts().OpenCL) { 300 getOpenCLOptions().addSupport( 301 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts()); 302 getOpenCLOptions().enableSupportedCore(getLangOpts()); 303 addImplicitTypedef("sampler_t", Context.OCLSamplerTy); 304 addImplicitTypedef("event_t", Context.OCLEventTy); 305 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().OpenCLVersion >= 200) { 306 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy); 307 addImplicitTypedef("queue_t", Context.OCLQueueTy); 308 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy); 309 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); 310 addImplicitTypedef("atomic_uint", 311 Context.getAtomicType(Context.UnsignedIntTy)); 312 auto AtomicLongT = Context.getAtomicType(Context.LongTy); 313 addImplicitTypedef("atomic_long", AtomicLongT); 314 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy); 315 addImplicitTypedef("atomic_ulong", AtomicULongT); 316 addImplicitTypedef("atomic_float", 317 Context.getAtomicType(Context.FloatTy)); 318 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy); 319 addImplicitTypedef("atomic_double", AtomicDoubleT); 320 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as 321 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. 322 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); 323 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType()); 324 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT); 325 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType()); 326 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT); 327 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType()); 328 addImplicitTypedef("atomic_size_t", AtomicSizeT); 329 auto AtomicPtrDiffT = Context.getAtomicType(Context.getPointerDiffType()); 330 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT); 331 332 // OpenCL v2.0 s6.13.11.6: 333 // - The atomic_long and atomic_ulong types are supported if the 334 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics 335 // extensions are supported. 336 // - The atomic_double type is only supported if double precision 337 // is supported and the cl_khr_int64_base_atomics and 338 // cl_khr_int64_extended_atomics extensions are supported. 339 // - If the device address space is 64-bits, the data types 340 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and 341 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and 342 // cl_khr_int64_extended_atomics extensions are supported. 343 std::vector<QualType> Atomic64BitTypes; 344 Atomic64BitTypes.push_back(AtomicLongT); 345 Atomic64BitTypes.push_back(AtomicULongT); 346 Atomic64BitTypes.push_back(AtomicDoubleT); 347 if (Context.getTypeSize(AtomicSizeT) == 64) { 348 Atomic64BitTypes.push_back(AtomicSizeT); 349 Atomic64BitTypes.push_back(AtomicIntPtrT); 350 Atomic64BitTypes.push_back(AtomicUIntPtrT); 351 Atomic64BitTypes.push_back(AtomicPtrDiffT); 352 } 353 for (auto &I : Atomic64BitTypes) 354 setOpenCLExtensionForType(I, 355 "cl_khr_int64_base_atomics cl_khr_int64_extended_atomics"); 356 357 setOpenCLExtensionForType(AtomicDoubleT, "cl_khr_fp64"); 358 } 359 360 setOpenCLExtensionForType(Context.DoubleTy, "cl_khr_fp64"); 361 362 #define GENERIC_IMAGE_TYPE_EXT(Type, Id, Ext) \ 363 setOpenCLExtensionForType(Context.Id, Ext); 364 #include "clang/Basic/OpenCLImageTypes.def" 365 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 366 addImplicitTypedef(#ExtType, Context.Id##Ty); \ 367 setOpenCLExtensionForType(Context.Id##Ty, #Ext); 368 #include "clang/Basic/OpenCLExtensionTypes.def" 369 } 370 371 if (Context.getTargetInfo().hasAArch64SVETypes()) { 372 #define SVE_TYPE(Name, Id, SingletonId) \ 373 addImplicitTypedef(Name, Context.SingletonId); 374 #include "clang/Basic/AArch64SVEACLETypes.def" 375 } 376 377 if (Context.getTargetInfo().getTriple().isPPC64() && 378 Context.getTargetInfo().hasFeature("paired-vector-memops")) { 379 if (Context.getTargetInfo().hasFeature("mma")) { 380 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 381 addImplicitTypedef(#Name, Context.Id##Ty); 382 #include "clang/Basic/PPCTypes.def" 383 } 384 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 385 addImplicitTypedef(#Name, Context.Id##Ty); 386 #include "clang/Basic/PPCTypes.def" 387 } 388 389 if (Context.getTargetInfo().hasBuiltinMSVaList()) { 390 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list"); 391 if (IdResolver.begin(MSVaList) == IdResolver.end()) 392 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope); 393 } 394 395 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); 396 if (IdResolver.begin(BuiltinVaList) == IdResolver.end()) 397 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope); 398 } 399 400 Sema::~Sema() { 401 assert(InstantiatingSpecializations.empty() && 402 "failed to clean up an InstantiatingTemplate?"); 403 404 if (VisContext) FreeVisContext(); 405 406 // Kill all the active scopes. 407 for (sema::FunctionScopeInfo *FSI : FunctionScopes) 408 delete FSI; 409 410 // Tell the SemaConsumer to forget about us; we're going out of scope. 411 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 412 SC->ForgetSema(); 413 414 // Detach from the external Sema source. 415 if (ExternalSemaSource *ExternalSema 416 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 417 ExternalSema->ForgetSema(); 418 419 // If Sema's ExternalSource is the multiplexer - we own it. 420 if (isMultiplexExternalSource) 421 delete ExternalSource; 422 423 // Delete cached satisfactions. 424 std::vector<ConstraintSatisfaction *> Satisfactions; 425 Satisfactions.reserve(Satisfactions.size()); 426 for (auto &Node : SatisfactionCache) 427 Satisfactions.push_back(&Node); 428 for (auto *Node : Satisfactions) 429 delete Node; 430 431 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); 432 433 // Destroys data sharing attributes stack for OpenMP 434 DestroyDataSharingAttributesStack(); 435 436 // Detach from the PP callback handler which outlives Sema since it's owned 437 // by the preprocessor. 438 SemaPPCallbackHandler->reset(); 439 } 440 441 void Sema::warnStackExhausted(SourceLocation Loc) { 442 // Only warn about this once. 443 if (!WarnedStackExhausted) { 444 Diag(Loc, diag::warn_stack_exhausted); 445 WarnedStackExhausted = true; 446 } 447 } 448 449 void Sema::runWithSufficientStackSpace(SourceLocation Loc, 450 llvm::function_ref<void()> Fn) { 451 clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn); 452 } 453 454 /// makeUnavailableInSystemHeader - There is an error in the current 455 /// context. If we're still in a system header, and we can plausibly 456 /// make the relevant declaration unavailable instead of erroring, do 457 /// so and return true. 458 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc, 459 UnavailableAttr::ImplicitReason reason) { 460 // If we're not in a function, it's an error. 461 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext); 462 if (!fn) return false; 463 464 // If we're in template instantiation, it's an error. 465 if (inTemplateInstantiation()) 466 return false; 467 468 // If that function's not in a system header, it's an error. 469 if (!Context.getSourceManager().isInSystemHeader(loc)) 470 return false; 471 472 // If the function is already unavailable, it's not an error. 473 if (fn->hasAttr<UnavailableAttr>()) return true; 474 475 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc)); 476 return true; 477 } 478 479 ASTMutationListener *Sema::getASTMutationListener() const { 480 return getASTConsumer().GetASTMutationListener(); 481 } 482 483 ///Registers an external source. If an external source already exists, 484 /// creates a multiplex external source and appends to it. 485 /// 486 ///\param[in] E - A non-null external sema source. 487 /// 488 void Sema::addExternalSource(ExternalSemaSource *E) { 489 assert(E && "Cannot use with NULL ptr"); 490 491 if (!ExternalSource) { 492 ExternalSource = E; 493 return; 494 } 495 496 if (isMultiplexExternalSource) 497 static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E); 498 else { 499 ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E); 500 isMultiplexExternalSource = true; 501 } 502 } 503 504 /// Print out statistics about the semantic analysis. 505 void Sema::PrintStats() const { 506 llvm::errs() << "\n*** Semantic Analysis Stats:\n"; 507 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n"; 508 509 BumpAlloc.PrintStats(); 510 AnalysisWarnings.PrintStats(); 511 } 512 513 void Sema::diagnoseNullableToNonnullConversion(QualType DstType, 514 QualType SrcType, 515 SourceLocation Loc) { 516 Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context); 517 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable && 518 *ExprNullability != NullabilityKind::NullableResult)) 519 return; 520 521 Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context); 522 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull) 523 return; 524 525 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType; 526 } 527 528 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) { 529 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant, 530 E->getBeginLoc())) 531 return; 532 // nullptr only exists from C++11 on, so don't warn on its absence earlier. 533 if (!getLangOpts().CPlusPlus11) 534 return; 535 536 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer) 537 return; 538 if (E->IgnoreParenImpCasts()->getType()->isNullPtrType()) 539 return; 540 541 // Don't diagnose the conversion from a 0 literal to a null pointer argument 542 // in a synthesized call to operator<=>. 543 if (!CodeSynthesisContexts.empty() && 544 CodeSynthesisContexts.back().Kind == 545 CodeSynthesisContext::RewritingOperatorAsSpaceship) 546 return; 547 548 // If it is a macro from system header, and if the macro name is not "NULL", 549 // do not warn. 550 SourceLocation MaybeMacroLoc = E->getBeginLoc(); 551 if (Diags.getSuppressSystemWarnings() && 552 SourceMgr.isInSystemMacro(MaybeMacroLoc) && 553 !findMacroSpelling(MaybeMacroLoc, "NULL")) 554 return; 555 556 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant) 557 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr"); 558 } 559 560 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast. 561 /// If there is already an implicit cast, merge into the existing one. 562 /// The result is of the given category. 563 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, 564 CastKind Kind, ExprValueKind VK, 565 const CXXCastPath *BasePath, 566 CheckedConversionKind CCK) { 567 #ifndef NDEBUG 568 if (VK == VK_RValue && !E->isRValue()) { 569 switch (Kind) { 570 default: 571 llvm_unreachable(("can't implicitly cast lvalue to rvalue with this cast " 572 "kind: " + 573 std::string(CastExpr::getCastKindName(Kind))) 574 .c_str()); 575 case CK_Dependent: 576 case CK_LValueToRValue: 577 case CK_ArrayToPointerDecay: 578 case CK_FunctionToPointerDecay: 579 case CK_ToVoid: 580 case CK_NonAtomicToAtomic: 581 break; 582 } 583 } 584 assert((VK == VK_RValue || Kind == CK_Dependent || !E->isRValue()) && 585 "can't cast rvalue to lvalue"); 586 #endif 587 588 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc()); 589 diagnoseZeroToNullptrConversion(Kind, E); 590 591 QualType ExprTy = Context.getCanonicalType(E->getType()); 592 QualType TypeTy = Context.getCanonicalType(Ty); 593 594 if (ExprTy == TypeTy) 595 return E; 596 597 // C++1z [conv.array]: The temporary materialization conversion is applied. 598 // We also use this to fuel C++ DR1213, which applies to C++11 onwards. 599 if (Kind == CK_ArrayToPointerDecay && getLangOpts().CPlusPlus && 600 E->getValueKind() == VK_RValue) { 601 // The temporary is an lvalue in C++98 and an xvalue otherwise. 602 ExprResult Materialized = CreateMaterializeTemporaryExpr( 603 E->getType(), E, !getLangOpts().CPlusPlus11); 604 if (Materialized.isInvalid()) 605 return ExprError(); 606 E = Materialized.get(); 607 } 608 609 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { 610 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { 611 ImpCast->setType(Ty); 612 ImpCast->setValueKind(VK); 613 return E; 614 } 615 } 616 617 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK, 618 CurFPFeatureOverrides()); 619 } 620 621 /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding 622 /// to the conversion from scalar type ScalarTy to the Boolean type. 623 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) { 624 switch (ScalarTy->getScalarTypeKind()) { 625 case Type::STK_Bool: return CK_NoOp; 626 case Type::STK_CPointer: return CK_PointerToBoolean; 627 case Type::STK_BlockPointer: return CK_PointerToBoolean; 628 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean; 629 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean; 630 case Type::STK_Integral: return CK_IntegralToBoolean; 631 case Type::STK_Floating: return CK_FloatingToBoolean; 632 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean; 633 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean; 634 case Type::STK_FixedPoint: return CK_FixedPointToBoolean; 635 } 636 llvm_unreachable("unknown scalar type kind"); 637 } 638 639 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector. 640 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) { 641 if (D->getMostRecentDecl()->isUsed()) 642 return true; 643 644 if (D->isExternallyVisible()) 645 return true; 646 647 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 648 // If this is a function template and none of its specializations is used, 649 // we should warn. 650 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate()) 651 for (const auto *Spec : Template->specializations()) 652 if (ShouldRemoveFromUnused(SemaRef, Spec)) 653 return true; 654 655 // UnusedFileScopedDecls stores the first declaration. 656 // The declaration may have become definition so check again. 657 const FunctionDecl *DeclToCheck; 658 if (FD->hasBody(DeclToCheck)) 659 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 660 661 // Later redecls may add new information resulting in not having to warn, 662 // so check again. 663 DeclToCheck = FD->getMostRecentDecl(); 664 if (DeclToCheck != FD) 665 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 666 } 667 668 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 669 // If a variable usable in constant expressions is referenced, 670 // don't warn if it isn't used: if the value of a variable is required 671 // for the computation of a constant expression, it doesn't make sense to 672 // warn even if the variable isn't odr-used. (isReferenced doesn't 673 // precisely reflect that, but it's a decent approximation.) 674 if (VD->isReferenced() && 675 VD->mightBeUsableInConstantExpressions(SemaRef->Context)) 676 return true; 677 678 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate()) 679 // If this is a variable template and none of its specializations is used, 680 // we should warn. 681 for (const auto *Spec : Template->specializations()) 682 if (ShouldRemoveFromUnused(SemaRef, Spec)) 683 return true; 684 685 // UnusedFileScopedDecls stores the first declaration. 686 // The declaration may have become definition so check again. 687 const VarDecl *DeclToCheck = VD->getDefinition(); 688 if (DeclToCheck) 689 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 690 691 // Later redecls may add new information resulting in not having to warn, 692 // so check again. 693 DeclToCheck = VD->getMostRecentDecl(); 694 if (DeclToCheck != VD) 695 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 696 } 697 698 return false; 699 } 700 701 static bool isFunctionOrVarDeclExternC(NamedDecl *ND) { 702 if (auto *FD = dyn_cast<FunctionDecl>(ND)) 703 return FD->isExternC(); 704 return cast<VarDecl>(ND)->isExternC(); 705 } 706 707 /// Determine whether ND is an external-linkage function or variable whose 708 /// type has no linkage. 709 bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) { 710 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage, 711 // because we also want to catch the case where its type has VisibleNoLinkage, 712 // which does not affect the linkage of VD. 713 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() && 714 !isExternalFormalLinkage(VD->getType()->getLinkage()) && 715 !isFunctionOrVarDeclExternC(VD); 716 } 717 718 /// Obtains a sorted list of functions and variables that are undefined but 719 /// ODR-used. 720 void Sema::getUndefinedButUsed( 721 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) { 722 for (const auto &UndefinedUse : UndefinedButUsed) { 723 NamedDecl *ND = UndefinedUse.first; 724 725 // Ignore attributes that have become invalid. 726 if (ND->isInvalidDecl()) continue; 727 728 // __attribute__((weakref)) is basically a definition. 729 if (ND->hasAttr<WeakRefAttr>()) continue; 730 731 if (isa<CXXDeductionGuideDecl>(ND)) 732 continue; 733 734 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) { 735 // An exported function will always be emitted when defined, so even if 736 // the function is inline, it doesn't have to be emitted in this TU. An 737 // imported function implies that it has been exported somewhere else. 738 continue; 739 } 740 741 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { 742 if (FD->isDefined()) 743 continue; 744 if (FD->isExternallyVisible() && 745 !isExternalWithNoLinkageType(FD) && 746 !FD->getMostRecentDecl()->isInlined() && 747 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 748 continue; 749 if (FD->getBuiltinID()) 750 continue; 751 } else { 752 auto *VD = cast<VarDecl>(ND); 753 if (VD->hasDefinition() != VarDecl::DeclarationOnly) 754 continue; 755 if (VD->isExternallyVisible() && 756 !isExternalWithNoLinkageType(VD) && 757 !VD->getMostRecentDecl()->isInline() && 758 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 759 continue; 760 761 // Skip VarDecls that lack formal definitions but which we know are in 762 // fact defined somewhere. 763 if (VD->isKnownToBeDefined()) 764 continue; 765 } 766 767 Undefined.push_back(std::make_pair(ND, UndefinedUse.second)); 768 } 769 } 770 771 /// checkUndefinedButUsed - Check for undefined objects with internal linkage 772 /// or that are inline. 773 static void checkUndefinedButUsed(Sema &S) { 774 if (S.UndefinedButUsed.empty()) return; 775 776 // Collect all the still-undefined entities with internal linkage. 777 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined; 778 S.getUndefinedButUsed(Undefined); 779 if (Undefined.empty()) return; 780 781 for (auto Undef : Undefined) { 782 ValueDecl *VD = cast<ValueDecl>(Undef.first); 783 SourceLocation UseLoc = Undef.second; 784 785 if (S.isExternalWithNoLinkageType(VD)) { 786 // C++ [basic.link]p8: 787 // A type without linkage shall not be used as the type of a variable 788 // or function with external linkage unless 789 // -- the entity has C language linkage 790 // -- the entity is not odr-used or is defined in the same TU 791 // 792 // As an extension, accept this in cases where the type is externally 793 // visible, since the function or variable actually can be defined in 794 // another translation unit in that case. 795 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage()) 796 ? diag::ext_undefined_internal_type 797 : diag::err_undefined_internal_type) 798 << isa<VarDecl>(VD) << VD; 799 } else if (!VD->isExternallyVisible()) { 800 // FIXME: We can promote this to an error. The function or variable can't 801 // be defined anywhere else, so the program must necessarily violate the 802 // one definition rule. 803 S.Diag(VD->getLocation(), diag::warn_undefined_internal) 804 << isa<VarDecl>(VD) << VD; 805 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) { 806 (void)FD; 807 assert(FD->getMostRecentDecl()->isInlined() && 808 "used object requires definition but isn't inline or internal?"); 809 // FIXME: This is ill-formed; we should reject. 810 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD; 811 } else { 812 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() && 813 "used var requires definition but isn't inline or internal?"); 814 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD; 815 } 816 if (UseLoc.isValid()) 817 S.Diag(UseLoc, diag::note_used_here); 818 } 819 820 S.UndefinedButUsed.clear(); 821 } 822 823 void Sema::LoadExternalWeakUndeclaredIdentifiers() { 824 if (!ExternalSource) 825 return; 826 827 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; 828 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); 829 for (auto &WeakID : WeakIDs) 830 WeakUndeclaredIdentifiers.insert(WeakID); 831 } 832 833 834 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap; 835 836 /// Returns true, if all methods and nested classes of the given 837 /// CXXRecordDecl are defined in this translation unit. 838 /// 839 /// Should only be called from ActOnEndOfTranslationUnit so that all 840 /// definitions are actually read. 841 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD, 842 RecordCompleteMap &MNCComplete) { 843 RecordCompleteMap::iterator Cache = MNCComplete.find(RD); 844 if (Cache != MNCComplete.end()) 845 return Cache->second; 846 if (!RD->isCompleteDefinition()) 847 return false; 848 bool Complete = true; 849 for (DeclContext::decl_iterator I = RD->decls_begin(), 850 E = RD->decls_end(); 851 I != E && Complete; ++I) { 852 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) 853 Complete = M->isDefined() || M->isDefaulted() || 854 (M->isPure() && !isa<CXXDestructorDecl>(M)); 855 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I)) 856 // If the template function is marked as late template parsed at this 857 // point, it has not been instantiated and therefore we have not 858 // performed semantic analysis on it yet, so we cannot know if the type 859 // can be considered complete. 860 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() && 861 F->getTemplatedDecl()->isDefined(); 862 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) { 863 if (R->isInjectedClassName()) 864 continue; 865 if (R->hasDefinition()) 866 Complete = MethodsAndNestedClassesComplete(R->getDefinition(), 867 MNCComplete); 868 else 869 Complete = false; 870 } 871 } 872 MNCComplete[RD] = Complete; 873 return Complete; 874 } 875 876 /// Returns true, if the given CXXRecordDecl is fully defined in this 877 /// translation unit, i.e. all methods are defined or pure virtual and all 878 /// friends, friend functions and nested classes are fully defined in this 879 /// translation unit. 880 /// 881 /// Should only be called from ActOnEndOfTranslationUnit so that all 882 /// definitions are actually read. 883 static bool IsRecordFullyDefined(const CXXRecordDecl *RD, 884 RecordCompleteMap &RecordsComplete, 885 RecordCompleteMap &MNCComplete) { 886 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD); 887 if (Cache != RecordsComplete.end()) 888 return Cache->second; 889 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete); 890 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(), 891 E = RD->friend_end(); 892 I != E && Complete; ++I) { 893 // Check if friend classes and methods are complete. 894 if (TypeSourceInfo *TSI = (*I)->getFriendType()) { 895 // Friend classes are available as the TypeSourceInfo of the FriendDecl. 896 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl()) 897 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete); 898 else 899 Complete = false; 900 } else { 901 // Friend functions are available through the NamedDecl of FriendDecl. 902 if (const FunctionDecl *FD = 903 dyn_cast<FunctionDecl>((*I)->getFriendDecl())) 904 Complete = FD->isDefined(); 905 else 906 // This is a template friend, give up. 907 Complete = false; 908 } 909 } 910 RecordsComplete[RD] = Complete; 911 return Complete; 912 } 913 914 void Sema::emitAndClearUnusedLocalTypedefWarnings() { 915 if (ExternalSource) 916 ExternalSource->ReadUnusedLocalTypedefNameCandidates( 917 UnusedLocalTypedefNameCandidates); 918 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) { 919 if (TD->isReferenced()) 920 continue; 921 Diag(TD->getLocation(), diag::warn_unused_local_typedef) 922 << isa<TypeAliasDecl>(TD) << TD->getDeclName(); 923 } 924 UnusedLocalTypedefNameCandidates.clear(); 925 } 926 927 /// This is called before the very first declaration in the translation unit 928 /// is parsed. Note that the ASTContext may have already injected some 929 /// declarations. 930 void Sema::ActOnStartOfTranslationUnit() { 931 if (getLangOpts().ModulesTS && 932 (getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface || 933 getLangOpts().getCompilingModule() == LangOptions::CMK_None)) { 934 // We start in an implied global module fragment. 935 SourceLocation StartOfTU = 936 SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); 937 ActOnGlobalModuleFragmentDecl(StartOfTU); 938 ModuleScopes.back().ImplicitGlobalModuleFragment = true; 939 } 940 } 941 942 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) { 943 // No explicit actions are required at the end of the global module fragment. 944 if (Kind == TUFragmentKind::Global) 945 return; 946 947 // Transfer late parsed template instantiations over to the pending template 948 // instantiation list. During normal compilation, the late template parser 949 // will be installed and instantiating these templates will succeed. 950 // 951 // If we are building a TU prefix for serialization, it is also safe to 952 // transfer these over, even though they are not parsed. The end of the TU 953 // should be outside of any eager template instantiation scope, so when this 954 // AST is deserialized, these templates will not be parsed until the end of 955 // the combined TU. 956 PendingInstantiations.insert(PendingInstantiations.end(), 957 LateParsedInstantiations.begin(), 958 LateParsedInstantiations.end()); 959 LateParsedInstantiations.clear(); 960 961 // If DefinedUsedVTables ends up marking any virtual member functions it 962 // might lead to more pending template instantiations, which we then need 963 // to instantiate. 964 DefineUsedVTables(); 965 966 // C++: Perform implicit template instantiations. 967 // 968 // FIXME: When we perform these implicit instantiations, we do not 969 // carefully keep track of the point of instantiation (C++ [temp.point]). 970 // This means that name lookup that occurs within the template 971 // instantiation will always happen at the end of the translation unit, 972 // so it will find some names that are not required to be found. This is 973 // valid, but we could do better by diagnosing if an instantiation uses a 974 // name that was not visible at its first point of instantiation. 975 if (ExternalSource) { 976 // Load pending instantiations from the external source. 977 SmallVector<PendingImplicitInstantiation, 4> Pending; 978 ExternalSource->ReadPendingInstantiations(Pending); 979 for (auto PII : Pending) 980 if (auto Func = dyn_cast<FunctionDecl>(PII.first)) 981 Func->setInstantiationIsPending(true); 982 PendingInstantiations.insert(PendingInstantiations.begin(), 983 Pending.begin(), Pending.end()); 984 } 985 986 { 987 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 988 PerformPendingInstantiations(); 989 } 990 991 emitDeferredDiags(); 992 993 assert(LateParsedInstantiations.empty() && 994 "end of TU template instantiation should not create more " 995 "late-parsed templates"); 996 997 // Report diagnostics for uncorrected delayed typos. Ideally all of them 998 // should have been corrected by that time, but it is very hard to cover all 999 // cases in practice. 1000 for (const auto &Typo : DelayedTypos) { 1001 // We pass an empty TypoCorrection to indicate no correction was performed. 1002 Typo.second.DiagHandler(TypoCorrection()); 1003 } 1004 DelayedTypos.clear(); 1005 } 1006 1007 /// ActOnEndOfTranslationUnit - This is called at the very end of the 1008 /// translation unit when EOF is reached and all but the top-level scope is 1009 /// popped. 1010 void Sema::ActOnEndOfTranslationUnit() { 1011 assert(DelayedDiagnostics.getCurrentPool() == nullptr 1012 && "reached end of translation unit with a pool attached?"); 1013 1014 // If code completion is enabled, don't perform any end-of-translation-unit 1015 // work. 1016 if (PP.isCodeCompletionEnabled()) 1017 return; 1018 1019 // Complete translation units and modules define vtables and perform implicit 1020 // instantiations. PCH files do not. 1021 if (TUKind != TU_Prefix) { 1022 DiagnoseUseOfUnimplementedSelectors(); 1023 1024 ActOnEndOfTranslationUnitFragment( 1025 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind == 1026 Module::PrivateModuleFragment 1027 ? TUFragmentKind::Private 1028 : TUFragmentKind::Normal); 1029 1030 if (LateTemplateParserCleanup) 1031 LateTemplateParserCleanup(OpaqueParser); 1032 1033 CheckDelayedMemberExceptionSpecs(); 1034 } else { 1035 // If we are building a TU prefix for serialization, it is safe to transfer 1036 // these over, even though they are not parsed. The end of the TU should be 1037 // outside of any eager template instantiation scope, so when this AST is 1038 // deserialized, these templates will not be parsed until the end of the 1039 // combined TU. 1040 PendingInstantiations.insert(PendingInstantiations.end(), 1041 LateParsedInstantiations.begin(), 1042 LateParsedInstantiations.end()); 1043 LateParsedInstantiations.clear(); 1044 1045 if (LangOpts.PCHInstantiateTemplates) { 1046 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1047 PerformPendingInstantiations(); 1048 } 1049 } 1050 1051 DiagnoseUnterminatedPragmaAlignPack(); 1052 DiagnoseUnterminatedPragmaAttribute(); 1053 1054 // All delayed member exception specs should be checked or we end up accepting 1055 // incompatible declarations. 1056 assert(DelayedOverridingExceptionSpecChecks.empty()); 1057 assert(DelayedEquivalentExceptionSpecChecks.empty()); 1058 1059 // All dllexport classes should have been processed already. 1060 assert(DelayedDllExportClasses.empty()); 1061 assert(DelayedDllExportMemberFunctions.empty()); 1062 1063 // Remove file scoped decls that turned out to be used. 1064 UnusedFileScopedDecls.erase( 1065 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true), 1066 UnusedFileScopedDecls.end(), 1067 [this](const DeclaratorDecl *DD) { 1068 return ShouldRemoveFromUnused(this, DD); 1069 }), 1070 UnusedFileScopedDecls.end()); 1071 1072 if (TUKind == TU_Prefix) { 1073 // Translation unit prefixes don't need any of the checking below. 1074 if (!PP.isIncrementalProcessingEnabled()) 1075 TUScope = nullptr; 1076 return; 1077 } 1078 1079 // Check for #pragma weak identifiers that were never declared 1080 LoadExternalWeakUndeclaredIdentifiers(); 1081 for (auto WeakID : WeakUndeclaredIdentifiers) { 1082 if (WeakID.second.getUsed()) 1083 continue; 1084 1085 Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(), 1086 LookupOrdinaryName); 1087 if (PrevDecl != nullptr && 1088 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) 1089 Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type) 1090 << "'weak'" << ExpectedVariableOrFunction; 1091 else 1092 Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared) 1093 << WeakID.first; 1094 } 1095 1096 if (LangOpts.CPlusPlus11 && 1097 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation())) 1098 CheckDelegatingCtorCycles(); 1099 1100 if (!Diags.hasErrorOccurred()) { 1101 if (ExternalSource) 1102 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed); 1103 checkUndefinedButUsed(*this); 1104 } 1105 1106 // A global-module-fragment is only permitted within a module unit. 1107 bool DiagnosedMissingModuleDeclaration = false; 1108 if (!ModuleScopes.empty() && 1109 ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment && 1110 !ModuleScopes.back().ImplicitGlobalModuleFragment) { 1111 Diag(ModuleScopes.back().BeginLoc, 1112 diag::err_module_declaration_missing_after_global_module_introducer); 1113 DiagnosedMissingModuleDeclaration = true; 1114 } 1115 1116 if (TUKind == TU_Module) { 1117 // If we are building a module interface unit, we need to have seen the 1118 // module declaration by now. 1119 if (getLangOpts().getCompilingModule() == 1120 LangOptions::CMK_ModuleInterface && 1121 (ModuleScopes.empty() || 1122 !ModuleScopes.back().Module->isModulePurview()) && 1123 !DiagnosedMissingModuleDeclaration) { 1124 // FIXME: Make a better guess as to where to put the module declaration. 1125 Diag(getSourceManager().getLocForStartOfFile( 1126 getSourceManager().getMainFileID()), 1127 diag::err_module_declaration_missing); 1128 } 1129 1130 // If we are building a module, resolve all of the exported declarations 1131 // now. 1132 if (Module *CurrentModule = PP.getCurrentModule()) { 1133 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); 1134 1135 SmallVector<Module *, 2> Stack; 1136 Stack.push_back(CurrentModule); 1137 while (!Stack.empty()) { 1138 Module *Mod = Stack.pop_back_val(); 1139 1140 // Resolve the exported declarations and conflicts. 1141 // FIXME: Actually complain, once we figure out how to teach the 1142 // diagnostic client to deal with complaints in the module map at this 1143 // point. 1144 ModMap.resolveExports(Mod, /*Complain=*/false); 1145 ModMap.resolveUses(Mod, /*Complain=*/false); 1146 ModMap.resolveConflicts(Mod, /*Complain=*/false); 1147 1148 // Queue the submodules, so their exports will also be resolved. 1149 Stack.append(Mod->submodule_begin(), Mod->submodule_end()); 1150 } 1151 } 1152 1153 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for 1154 // modules when they are built, not every time they are used. 1155 emitAndClearUnusedLocalTypedefWarnings(); 1156 } 1157 1158 // C99 6.9.2p2: 1159 // A declaration of an identifier for an object that has file 1160 // scope without an initializer, and without a storage-class 1161 // specifier or with the storage-class specifier static, 1162 // constitutes a tentative definition. If a translation unit 1163 // contains one or more tentative definitions for an identifier, 1164 // and the translation unit contains no external definition for 1165 // that identifier, then the behavior is exactly as if the 1166 // translation unit contains a file scope declaration of that 1167 // identifier, with the composite type as of the end of the 1168 // translation unit, with an initializer equal to 0. 1169 llvm::SmallSet<VarDecl *, 32> Seen; 1170 for (TentativeDefinitionsType::iterator 1171 T = TentativeDefinitions.begin(ExternalSource), 1172 TEnd = TentativeDefinitions.end(); 1173 T != TEnd; ++T) { 1174 VarDecl *VD = (*T)->getActingDefinition(); 1175 1176 // If the tentative definition was completed, getActingDefinition() returns 1177 // null. If we've already seen this variable before, insert()'s second 1178 // return value is false. 1179 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second) 1180 continue; 1181 1182 if (const IncompleteArrayType *ArrayT 1183 = Context.getAsIncompleteArrayType(VD->getType())) { 1184 // Set the length of the array to 1 (C99 6.9.2p5). 1185 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array); 1186 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true); 1187 QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One, 1188 nullptr, ArrayType::Normal, 0); 1189 VD->setType(T); 1190 } else if (RequireCompleteType(VD->getLocation(), VD->getType(), 1191 diag::err_tentative_def_incomplete_type)) 1192 VD->setInvalidDecl(); 1193 1194 // No initialization is performed for a tentative definition. 1195 CheckCompleteVariableDeclaration(VD); 1196 1197 // Notify the consumer that we've completed a tentative definition. 1198 if (!VD->isInvalidDecl()) 1199 Consumer.CompleteTentativeDefinition(VD); 1200 } 1201 1202 for (auto D : ExternalDeclarations) { 1203 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed()) 1204 continue; 1205 1206 Consumer.CompleteExternalDeclaration(D); 1207 } 1208 1209 // If there were errors, disable 'unused' warnings since they will mostly be 1210 // noise. Don't warn for a use from a module: either we should warn on all 1211 // file-scope declarations in modules or not at all, but whether the 1212 // declaration is used is immaterial. 1213 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) { 1214 // Output warning for unused file scoped decls. 1215 for (UnusedFileScopedDeclsType::iterator 1216 I = UnusedFileScopedDecls.begin(ExternalSource), 1217 E = UnusedFileScopedDecls.end(); I != E; ++I) { 1218 if (ShouldRemoveFromUnused(this, *I)) 1219 continue; 1220 1221 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { 1222 const FunctionDecl *DiagD; 1223 if (!FD->hasBody(DiagD)) 1224 DiagD = FD; 1225 if (DiagD->isDeleted()) 1226 continue; // Deleted functions are supposed to be unused. 1227 if (DiagD->isReferenced()) { 1228 if (isa<CXXMethodDecl>(DiagD)) 1229 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function) 1230 << DiagD; 1231 else { 1232 if (FD->getStorageClass() == SC_Static && 1233 !FD->isInlineSpecified() && 1234 !SourceMgr.isInMainFile( 1235 SourceMgr.getExpansionLoc(FD->getLocation()))) 1236 Diag(DiagD->getLocation(), 1237 diag::warn_unneeded_static_internal_decl) 1238 << DiagD; 1239 else 1240 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1241 << /*function*/ 0 << DiagD; 1242 } 1243 } else { 1244 if (FD->getDescribedFunctionTemplate()) 1245 Diag(DiagD->getLocation(), diag::warn_unused_template) 1246 << /*function*/ 0 << DiagD; 1247 else 1248 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD) 1249 ? diag::warn_unused_member_function 1250 : diag::warn_unused_function) 1251 << DiagD; 1252 } 1253 } else { 1254 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition(); 1255 if (!DiagD) 1256 DiagD = cast<VarDecl>(*I); 1257 if (DiagD->isReferenced()) { 1258 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1259 << /*variable*/ 1 << DiagD; 1260 } else if (DiagD->getType().isConstQualified()) { 1261 const SourceManager &SM = SourceMgr; 1262 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) || 1263 !PP.getLangOpts().IsHeaderFile) 1264 Diag(DiagD->getLocation(), diag::warn_unused_const_variable) 1265 << DiagD; 1266 } else { 1267 if (DiagD->getDescribedVarTemplate()) 1268 Diag(DiagD->getLocation(), diag::warn_unused_template) 1269 << /*variable*/ 1 << DiagD; 1270 else 1271 Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD; 1272 } 1273 } 1274 } 1275 1276 emitAndClearUnusedLocalTypedefWarnings(); 1277 } 1278 1279 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) { 1280 // FIXME: Load additional unused private field candidates from the external 1281 // source. 1282 RecordCompleteMap RecordsComplete; 1283 RecordCompleteMap MNCComplete; 1284 for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(), 1285 E = UnusedPrivateFields.end(); I != E; ++I) { 1286 const NamedDecl *D = *I; 1287 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); 1288 if (RD && !RD->isUnion() && 1289 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) { 1290 Diag(D->getLocation(), diag::warn_unused_private_field) 1291 << D->getDeclName(); 1292 } 1293 } 1294 } 1295 1296 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { 1297 if (ExternalSource) 1298 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); 1299 for (const auto &DeletedFieldInfo : DeleteExprs) { 1300 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { 1301 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, 1302 DeleteExprLoc.second); 1303 } 1304 } 1305 } 1306 1307 // Check we've noticed that we're no longer parsing the initializer for every 1308 // variable. If we miss cases, then at best we have a performance issue and 1309 // at worst a rejects-valid bug. 1310 assert(ParsingInitForAutoVars.empty() && 1311 "Didn't unmark var as having its initializer parsed"); 1312 1313 if (!PP.isIncrementalProcessingEnabled()) 1314 TUScope = nullptr; 1315 } 1316 1317 1318 //===----------------------------------------------------------------------===// 1319 // Helper functions. 1320 //===----------------------------------------------------------------------===// 1321 1322 DeclContext *Sema::getFunctionLevelDeclContext() { 1323 DeclContext *DC = CurContext; 1324 1325 while (true) { 1326 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) || 1327 isa<RequiresExprBodyDecl>(DC)) { 1328 DC = DC->getParent(); 1329 } else if (isa<CXXMethodDecl>(DC) && 1330 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call && 1331 cast<CXXRecordDecl>(DC->getParent())->isLambda()) { 1332 DC = DC->getParent()->getParent(); 1333 } 1334 else break; 1335 } 1336 1337 return DC; 1338 } 1339 1340 /// getCurFunctionDecl - If inside of a function body, this returns a pointer 1341 /// to the function decl for the function being parsed. If we're currently 1342 /// in a 'block', this returns the containing context. 1343 FunctionDecl *Sema::getCurFunctionDecl() { 1344 DeclContext *DC = getFunctionLevelDeclContext(); 1345 return dyn_cast<FunctionDecl>(DC); 1346 } 1347 1348 ObjCMethodDecl *Sema::getCurMethodDecl() { 1349 DeclContext *DC = getFunctionLevelDeclContext(); 1350 while (isa<RecordDecl>(DC)) 1351 DC = DC->getParent(); 1352 return dyn_cast<ObjCMethodDecl>(DC); 1353 } 1354 1355 NamedDecl *Sema::getCurFunctionOrMethodDecl() { 1356 DeclContext *DC = getFunctionLevelDeclContext(); 1357 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC)) 1358 return cast<NamedDecl>(DC); 1359 return nullptr; 1360 } 1361 1362 LangAS Sema::getDefaultCXXMethodAddrSpace() const { 1363 if (getLangOpts().OpenCL) 1364 return LangAS::opencl_generic; 1365 return LangAS::Default; 1366 } 1367 1368 void Sema::EmitCurrentDiagnostic(unsigned DiagID) { 1369 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here 1370 // and yet we also use the current diag ID on the DiagnosticsEngine. This has 1371 // been made more painfully obvious by the refactor that introduced this 1372 // function, but it is possible that the incoming argument can be 1373 // eliminated. If it truly cannot be (for example, there is some reentrancy 1374 // issue I am not seeing yet), then there should at least be a clarifying 1375 // comment somewhere. 1376 if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) { 1377 switch (DiagnosticIDs::getDiagnosticSFINAEResponse( 1378 Diags.getCurrentDiagID())) { 1379 case DiagnosticIDs::SFINAE_Report: 1380 // We'll report the diagnostic below. 1381 break; 1382 1383 case DiagnosticIDs::SFINAE_SubstitutionFailure: 1384 // Count this failure so that we know that template argument deduction 1385 // has failed. 1386 ++NumSFINAEErrors; 1387 1388 // Make a copy of this suppressed diagnostic and store it with the 1389 // template-deduction information. 1390 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1391 Diagnostic DiagInfo(&Diags); 1392 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1393 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1394 } 1395 1396 Diags.setLastDiagnosticIgnored(true); 1397 Diags.Clear(); 1398 return; 1399 1400 case DiagnosticIDs::SFINAE_AccessControl: { 1401 // Per C++ Core Issue 1170, access control is part of SFINAE. 1402 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily 1403 // make access control a part of SFINAE for the purposes of checking 1404 // type traits. 1405 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11) 1406 break; 1407 1408 SourceLocation Loc = Diags.getCurrentDiagLoc(); 1409 1410 // Suppress this diagnostic. 1411 ++NumSFINAEErrors; 1412 1413 // Make a copy of this suppressed diagnostic and store it with the 1414 // template-deduction information. 1415 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1416 Diagnostic DiagInfo(&Diags); 1417 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1418 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1419 } 1420 1421 Diags.setLastDiagnosticIgnored(true); 1422 Diags.Clear(); 1423 1424 // Now the diagnostic state is clear, produce a C++98 compatibility 1425 // warning. 1426 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control); 1427 1428 // The last diagnostic which Sema produced was ignored. Suppress any 1429 // notes attached to it. 1430 Diags.setLastDiagnosticIgnored(true); 1431 return; 1432 } 1433 1434 case DiagnosticIDs::SFINAE_Suppress: 1435 // Make a copy of this suppressed diagnostic and store it with the 1436 // template-deduction information; 1437 if (*Info) { 1438 Diagnostic DiagInfo(&Diags); 1439 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(), 1440 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1441 } 1442 1443 // Suppress this diagnostic. 1444 Diags.setLastDiagnosticIgnored(true); 1445 Diags.Clear(); 1446 return; 1447 } 1448 } 1449 1450 // Copy the diagnostic printing policy over the ASTContext printing policy. 1451 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292 1452 Context.setPrintingPolicy(getPrintingPolicy()); 1453 1454 // Emit the diagnostic. 1455 if (!Diags.EmitCurrentDiagnostic()) 1456 return; 1457 1458 // If this is not a note, and we're in a template instantiation 1459 // that is different from the last template instantiation where 1460 // we emitted an error, print a template instantiation 1461 // backtrace. 1462 if (!DiagnosticIDs::isBuiltinNote(DiagID)) 1463 PrintContextStack(); 1464 } 1465 1466 Sema::SemaDiagnosticBuilder 1467 Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) { 1468 return Diag(Loc, PD.getDiagID(), DeferHint) << PD; 1469 } 1470 1471 bool Sema::hasUncompilableErrorOccurred() const { 1472 if (getDiagnostics().hasUncompilableErrorOccurred()) 1473 return true; 1474 auto *FD = dyn_cast<FunctionDecl>(CurContext); 1475 if (!FD) 1476 return false; 1477 auto Loc = DeviceDeferredDiags.find(FD); 1478 if (Loc == DeviceDeferredDiags.end()) 1479 return false; 1480 for (auto PDAt : Loc->second) { 1481 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID())) 1482 return true; 1483 } 1484 return false; 1485 } 1486 1487 // Print notes showing how we can reach FD starting from an a priori 1488 // known-callable function. 1489 static void emitCallStackNotes(Sema &S, FunctionDecl *FD) { 1490 auto FnIt = S.DeviceKnownEmittedFns.find(FD); 1491 while (FnIt != S.DeviceKnownEmittedFns.end()) { 1492 // Respect error limit. 1493 if (S.Diags.hasFatalErrorOccurred()) 1494 return; 1495 DiagnosticBuilder Builder( 1496 S.Diags.Report(FnIt->second.Loc, diag::note_called_by)); 1497 Builder << FnIt->second.FD; 1498 FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD); 1499 } 1500 } 1501 1502 namespace { 1503 1504 /// Helper class that emits deferred diagnostic messages if an entity directly 1505 /// or indirectly using the function that causes the deferred diagnostic 1506 /// messages is known to be emitted. 1507 /// 1508 /// During parsing of AST, certain diagnostic messages are recorded as deferred 1509 /// diagnostics since it is unknown whether the functions containing such 1510 /// diagnostics will be emitted. A list of potentially emitted functions and 1511 /// variables that may potentially trigger emission of functions are also 1512 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions 1513 /// by each function to emit deferred diagnostics. 1514 /// 1515 /// During the visit, certain OpenMP directives or initializer of variables 1516 /// with certain OpenMP attributes will cause subsequent visiting of any 1517 /// functions enter a state which is called OpenMP device context in this 1518 /// implementation. The state is exited when the directive or initializer is 1519 /// exited. This state can change the emission states of subsequent uses 1520 /// of functions. 1521 /// 1522 /// Conceptually the functions or variables to be visited form a use graph 1523 /// where the parent node uses the child node. At any point of the visit, 1524 /// the tree nodes traversed from the tree root to the current node form a use 1525 /// stack. The emission state of the current node depends on two factors: 1526 /// 1. the emission state of the root node 1527 /// 2. whether the current node is in OpenMP device context 1528 /// If the function is decided to be emitted, its contained deferred diagnostics 1529 /// are emitted, together with the information about the use stack. 1530 /// 1531 class DeferredDiagnosticsEmitter 1532 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> { 1533 public: 1534 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited; 1535 1536 // Whether the function is already in the current use-path. 1537 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath; 1538 1539 // The current use-path. 1540 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath; 1541 1542 // Whether the visiting of the function has been done. Done[0] is for the 1543 // case not in OpenMP device context. Done[1] is for the case in OpenMP 1544 // device context. We need two sets because diagnostics emission may be 1545 // different depending on whether it is in OpenMP device context. 1546 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2]; 1547 1548 // Emission state of the root node of the current use graph. 1549 bool ShouldEmitRootNode; 1550 1551 // Current OpenMP device context level. It is initialized to 0 and each 1552 // entering of device context increases it by 1 and each exit decreases 1553 // it by 1. Non-zero value indicates it is currently in device context. 1554 unsigned InOMPDeviceContext; 1555 1556 DeferredDiagnosticsEmitter(Sema &S) 1557 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {} 1558 1559 void VisitOMPTargetDirective(OMPTargetDirective *Node) { 1560 ++InOMPDeviceContext; 1561 Inherited::VisitOMPTargetDirective(Node); 1562 --InOMPDeviceContext; 1563 } 1564 1565 void visitUsedDecl(SourceLocation Loc, Decl *D) { 1566 if (isa<VarDecl>(D)) 1567 return; 1568 if (auto *FD = dyn_cast<FunctionDecl>(D)) 1569 checkFunc(Loc, FD); 1570 else 1571 Inherited::visitUsedDecl(Loc, D); 1572 } 1573 1574 void checkVar(VarDecl *VD) { 1575 assert(VD->isFileVarDecl() && 1576 "Should only check file-scope variables"); 1577 if (auto *Init = VD->getInit()) { 1578 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); 1579 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost || 1580 *DevTy == OMPDeclareTargetDeclAttr::DT_Any); 1581 if (IsDev) 1582 ++InOMPDeviceContext; 1583 this->Visit(Init); 1584 if (IsDev) 1585 --InOMPDeviceContext; 1586 } 1587 } 1588 1589 void checkFunc(SourceLocation Loc, FunctionDecl *FD) { 1590 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0]; 1591 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back(); 1592 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) || 1593 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD)) 1594 return; 1595 // Finalize analysis of OpenMP-specific constructs. 1596 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 && 1597 (ShouldEmitRootNode || InOMPDeviceContext)) 1598 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); 1599 if (Caller) 1600 S.DeviceKnownEmittedFns[FD] = {Caller, Loc}; 1601 // Always emit deferred diagnostics for the direct users. This does not 1602 // lead to explosion of diagnostics since each user is visited at most 1603 // twice. 1604 if (ShouldEmitRootNode || InOMPDeviceContext) 1605 emitDeferredDiags(FD, Caller); 1606 // Do not revisit a function if the function body has been completely 1607 // visited before. 1608 if (!Done.insert(FD).second) 1609 return; 1610 InUsePath.insert(FD); 1611 UsePath.push_back(FD); 1612 if (auto *S = FD->getBody()) { 1613 this->Visit(S); 1614 } 1615 UsePath.pop_back(); 1616 InUsePath.erase(FD); 1617 } 1618 1619 void checkRecordedDecl(Decl *D) { 1620 if (auto *FD = dyn_cast<FunctionDecl>(D)) { 1621 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) == 1622 Sema::FunctionEmissionStatus::Emitted; 1623 checkFunc(SourceLocation(), FD); 1624 } else 1625 checkVar(cast<VarDecl>(D)); 1626 } 1627 1628 // Emit any deferred diagnostics for FD 1629 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) { 1630 auto It = S.DeviceDeferredDiags.find(FD); 1631 if (It == S.DeviceDeferredDiags.end()) 1632 return; 1633 bool HasWarningOrError = false; 1634 bool FirstDiag = true; 1635 for (PartialDiagnosticAt &PDAt : It->second) { 1636 // Respect error limit. 1637 if (S.Diags.hasFatalErrorOccurred()) 1638 return; 1639 const SourceLocation &Loc = PDAt.first; 1640 const PartialDiagnostic &PD = PDAt.second; 1641 HasWarningOrError |= 1642 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >= 1643 DiagnosticsEngine::Warning; 1644 { 1645 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID())); 1646 PD.Emit(Builder); 1647 } 1648 // Emit the note on the first diagnostic in case too many diagnostics 1649 // cause the note not emitted. 1650 if (FirstDiag && HasWarningOrError && ShowCallStack) { 1651 emitCallStackNotes(S, FD); 1652 FirstDiag = false; 1653 } 1654 } 1655 } 1656 }; 1657 } // namespace 1658 1659 void Sema::emitDeferredDiags() { 1660 if (ExternalSource) 1661 ExternalSource->ReadDeclsToCheckForDeferredDiags( 1662 DeclsToCheckForDeferredDiags); 1663 1664 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) || 1665 DeclsToCheckForDeferredDiags.empty()) 1666 return; 1667 1668 DeferredDiagnosticsEmitter DDE(*this); 1669 for (auto D : DeclsToCheckForDeferredDiags) 1670 DDE.checkRecordedDecl(D); 1671 } 1672 1673 // In CUDA, there are some constructs which may appear in semantically-valid 1674 // code, but trigger errors if we ever generate code for the function in which 1675 // they appear. Essentially every construct you're not allowed to use on the 1676 // device falls into this category, because you are allowed to use these 1677 // constructs in a __host__ __device__ function, but only if that function is 1678 // never codegen'ed on the device. 1679 // 1680 // To handle semantic checking for these constructs, we keep track of the set of 1681 // functions we know will be emitted, either because we could tell a priori that 1682 // they would be emitted, or because they were transitively called by a 1683 // known-emitted function. 1684 // 1685 // We also keep a partial call graph of which not-known-emitted functions call 1686 // which other not-known-emitted functions. 1687 // 1688 // When we see something which is illegal if the current function is emitted 1689 // (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or 1690 // CheckCUDACall), we first check if the current function is known-emitted. If 1691 // so, we immediately output the diagnostic. 1692 // 1693 // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags 1694 // until we discover that the function is known-emitted, at which point we take 1695 // it out of this map and emit the diagnostic. 1696 1697 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc, 1698 unsigned DiagID, 1699 FunctionDecl *Fn, Sema &S) 1700 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn), 1701 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) { 1702 switch (K) { 1703 case K_Nop: 1704 break; 1705 case K_Immediate: 1706 case K_ImmediateWithCallStack: 1707 ImmediateDiag.emplace( 1708 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID)); 1709 break; 1710 case K_Deferred: 1711 assert(Fn && "Must have a function to attach the deferred diag to."); 1712 auto &Diags = S.DeviceDeferredDiags[Fn]; 1713 PartialDiagId.emplace(Diags.size()); 1714 Diags.emplace_back(Loc, S.PDiag(DiagID)); 1715 break; 1716 } 1717 } 1718 1719 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D) 1720 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn), 1721 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag), 1722 PartialDiagId(D.PartialDiagId) { 1723 // Clean the previous diagnostics. 1724 D.ShowCallStack = false; 1725 D.ImmediateDiag.reset(); 1726 D.PartialDiagId.reset(); 1727 } 1728 1729 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() { 1730 if (ImmediateDiag) { 1731 // Emit our diagnostic and, if it was a warning or error, output a callstack 1732 // if Fn isn't a priori known-emitted. 1733 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel( 1734 DiagID, Loc) >= DiagnosticsEngine::Warning; 1735 ImmediateDiag.reset(); // Emit the immediate diag. 1736 if (IsWarningOrError && ShowCallStack) 1737 emitCallStackNotes(S, Fn); 1738 } else { 1739 assert((!PartialDiagId || ShowCallStack) && 1740 "Must always show call stack for deferred diags."); 1741 } 1742 } 1743 1744 Sema::SemaDiagnosticBuilder 1745 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) { 1746 FD = FD ? FD : getCurFunctionDecl(); 1747 if (LangOpts.OpenMP) 1748 return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD) 1749 : diagIfOpenMPHostCode(Loc, DiagID, FD); 1750 if (getLangOpts().CUDA) 1751 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID) 1752 : CUDADiagIfHostCode(Loc, DiagID); 1753 1754 if (getLangOpts().SYCLIsDevice) 1755 return SYCLDiagIfDeviceCode(Loc, DiagID); 1756 1757 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID, 1758 FD, *this); 1759 } 1760 1761 Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID, 1762 bool DeferHint) { 1763 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID); 1764 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag && 1765 DiagnosticIDs::isDeferrable(DiagID) && 1766 (DeferHint || !IsError); 1767 auto SetIsLastErrorImmediate = [&](bool Flag) { 1768 if (IsError) 1769 IsLastErrorImmediate = Flag; 1770 }; 1771 if (!ShouldDefer) { 1772 SetIsLastErrorImmediate(true); 1773 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, 1774 DiagID, getCurFunctionDecl(), *this); 1775 } 1776 1777 SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice 1778 ? CUDADiagIfDeviceCode(Loc, DiagID) 1779 : CUDADiagIfHostCode(Loc, DiagID); 1780 SetIsLastErrorImmediate(DB.isImmediate()); 1781 return DB; 1782 } 1783 1784 void Sema::checkDeviceDecl(ValueDecl *D, SourceLocation Loc) { 1785 if (isUnevaluatedContext()) 1786 return; 1787 1788 Decl *C = cast<Decl>(getCurLexicalContext()); 1789 1790 // Memcpy operations for structs containing a member with unsupported type 1791 // are ok, though. 1792 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) { 1793 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 1794 MD->isTrivial()) 1795 return; 1796 1797 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD)) 1798 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial()) 1799 return; 1800 } 1801 1802 // Try to associate errors with the lexical context, if that is a function, or 1803 // the value declaration otherwise. 1804 FunctionDecl *FD = 1805 isa<FunctionDecl>(C) ? cast<FunctionDecl>(C) : dyn_cast<FunctionDecl>(D); 1806 auto CheckType = [&](QualType Ty) { 1807 if (Ty->isDependentType()) 1808 return; 1809 1810 if (Ty->isExtIntType()) { 1811 if (!Context.getTargetInfo().hasExtIntType()) { 1812 targetDiag(Loc, diag::err_device_unsupported_type, FD) 1813 << D << false /*show bit size*/ << 0 /*bitsize*/ 1814 << Ty << Context.getTargetInfo().getTriple().str(); 1815 } 1816 return; 1817 } 1818 1819 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) || 1820 ((Ty->isFloat128Type() || 1821 (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) && 1822 !Context.getTargetInfo().hasFloat128Type()) || 1823 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 && 1824 !Context.getTargetInfo().hasInt128Type())) { 1825 if (targetDiag(Loc, diag::err_device_unsupported_type, FD) 1826 << D << true /*show bit size*/ 1827 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty 1828 << Context.getTargetInfo().getTriple().str()) 1829 D->setInvalidDecl(); 1830 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1831 } 1832 }; 1833 1834 QualType Ty = D->getType(); 1835 CheckType(Ty); 1836 1837 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) { 1838 for (const auto &ParamTy : FPTy->param_types()) 1839 CheckType(ParamTy); 1840 CheckType(FPTy->getReturnType()); 1841 } 1842 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty)) 1843 CheckType(FNPTy->getReturnType()); 1844 } 1845 1846 /// Looks through the macro-expansion chain for the given 1847 /// location, looking for a macro expansion with the given name. 1848 /// If one is found, returns true and sets the location to that 1849 /// expansion loc. 1850 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) { 1851 SourceLocation loc = locref; 1852 if (!loc.isMacroID()) return false; 1853 1854 // There's no good way right now to look at the intermediate 1855 // expansions, so just jump to the expansion location. 1856 loc = getSourceManager().getExpansionLoc(loc); 1857 1858 // If that's written with the name, stop here. 1859 SmallString<16> buffer; 1860 if (getPreprocessor().getSpelling(loc, buffer) == name) { 1861 locref = loc; 1862 return true; 1863 } 1864 return false; 1865 } 1866 1867 /// Determines the active Scope associated with the given declaration 1868 /// context. 1869 /// 1870 /// This routine maps a declaration context to the active Scope object that 1871 /// represents that declaration context in the parser. It is typically used 1872 /// from "scope-less" code (e.g., template instantiation, lazy creation of 1873 /// declarations) that injects a name for name-lookup purposes and, therefore, 1874 /// must update the Scope. 1875 /// 1876 /// \returns The scope corresponding to the given declaraion context, or NULL 1877 /// if no such scope is open. 1878 Scope *Sema::getScopeForContext(DeclContext *Ctx) { 1879 1880 if (!Ctx) 1881 return nullptr; 1882 1883 Ctx = Ctx->getPrimaryContext(); 1884 for (Scope *S = getCurScope(); S; S = S->getParent()) { 1885 // Ignore scopes that cannot have declarations. This is important for 1886 // out-of-line definitions of static class members. 1887 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) 1888 if (DeclContext *Entity = S->getEntity()) 1889 if (Ctx == Entity->getPrimaryContext()) 1890 return S; 1891 } 1892 1893 return nullptr; 1894 } 1895 1896 /// Enter a new function scope 1897 void Sema::PushFunctionScope() { 1898 if (FunctionScopes.empty() && CachedFunctionScope) { 1899 // Use CachedFunctionScope to avoid allocating memory when possible. 1900 CachedFunctionScope->Clear(); 1901 FunctionScopes.push_back(CachedFunctionScope.release()); 1902 } else { 1903 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); 1904 } 1905 if (LangOpts.OpenMP) 1906 pushOpenMPFunctionRegion(); 1907 } 1908 1909 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { 1910 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(), 1911 BlockScope, Block)); 1912 } 1913 1914 LambdaScopeInfo *Sema::PushLambdaScope() { 1915 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics()); 1916 FunctionScopes.push_back(LSI); 1917 return LSI; 1918 } 1919 1920 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) { 1921 if (LambdaScopeInfo *const LSI = getCurLambda()) { 1922 LSI->AutoTemplateParameterDepth = Depth; 1923 return; 1924 } 1925 llvm_unreachable( 1926 "Remove assertion if intentionally called in a non-lambda context."); 1927 } 1928 1929 // Check that the type of the VarDecl has an accessible copy constructor and 1930 // resolve its destructor's exception specification. 1931 static void checkEscapingByref(VarDecl *VD, Sema &S) { 1932 QualType T = VD->getType(); 1933 EnterExpressionEvaluationContext scope( 1934 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated); 1935 SourceLocation Loc = VD->getLocation(); 1936 Expr *VarRef = 1937 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); 1938 ExprResult Result = S.PerformMoveOrCopyInitialization( 1939 InitializedEntity::InitializeBlock(Loc, T, false), VD, VD->getType(), 1940 VarRef, /*AllowNRVO=*/true); 1941 if (!Result.isInvalid()) { 1942 Result = S.MaybeCreateExprWithCleanups(Result); 1943 Expr *Init = Result.getAs<Expr>(); 1944 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init)); 1945 } 1946 1947 // The destructor's exception specification is needed when IRGen generates 1948 // block copy/destroy functions. Resolve it here. 1949 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 1950 if (CXXDestructorDecl *DD = RD->getDestructor()) { 1951 auto *FPT = DD->getType()->getAs<FunctionProtoType>(); 1952 S.ResolveExceptionSpec(Loc, FPT); 1953 } 1954 } 1955 1956 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { 1957 // Set the EscapingByref flag of __block variables captured by 1958 // escaping blocks. 1959 for (const BlockDecl *BD : FSI.Blocks) { 1960 for (const BlockDecl::Capture &BC : BD->captures()) { 1961 VarDecl *VD = BC.getVariable(); 1962 if (VD->hasAttr<BlocksAttr>()) { 1963 // Nothing to do if this is a __block variable captured by a 1964 // non-escaping block. 1965 if (BD->doesNotEscape()) 1966 continue; 1967 VD->setEscapingByref(); 1968 } 1969 // Check whether the captured variable is or contains an object of 1970 // non-trivial C union type. 1971 QualType CapType = BC.getVariable()->getType(); 1972 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() || 1973 CapType.hasNonTrivialToPrimitiveCopyCUnion()) 1974 S.checkNonTrivialCUnion(BC.getVariable()->getType(), 1975 BD->getCaretLocation(), 1976 Sema::NTCUC_BlockCapture, 1977 Sema::NTCUK_Destruct|Sema::NTCUK_Copy); 1978 } 1979 } 1980 1981 for (VarDecl *VD : FSI.ByrefBlockVars) { 1982 // __block variables might require us to capture a copy-initializer. 1983 if (!VD->isEscapingByref()) 1984 continue; 1985 // It's currently invalid to ever have a __block variable with an 1986 // array type; should we diagnose that here? 1987 // Regardless, we don't want to ignore array nesting when 1988 // constructing this copy. 1989 if (VD->getType()->isStructureOrClassType()) 1990 checkEscapingByref(VD, S); 1991 } 1992 } 1993 1994 /// Pop a function (or block or lambda or captured region) scope from the stack. 1995 /// 1996 /// \param WP The warning policy to use for CFG-based warnings, or null if such 1997 /// warnings should not be produced. 1998 /// \param D The declaration corresponding to this function scope, if producing 1999 /// CFG-based warnings. 2000 /// \param BlockType The type of the block expression, if D is a BlockDecl. 2001 Sema::PoppedFunctionScopePtr 2002 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, 2003 const Decl *D, QualType BlockType) { 2004 assert(!FunctionScopes.empty() && "mismatched push/pop!"); 2005 2006 markEscapingByrefs(*FunctionScopes.back(), *this); 2007 2008 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(), 2009 PoppedFunctionScopeDeleter(this)); 2010 2011 if (LangOpts.OpenMP) 2012 popOpenMPFunctionRegion(Scope.get()); 2013 2014 // Issue any analysis-based warnings. 2015 if (WP && D) 2016 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType); 2017 else 2018 for (const auto &PUD : Scope->PossiblyUnreachableDiags) 2019 Diag(PUD.Loc, PUD.PD); 2020 2021 return Scope; 2022 } 2023 2024 void Sema::PoppedFunctionScopeDeleter:: 2025 operator()(sema::FunctionScopeInfo *Scope) const { 2026 // Stash the function scope for later reuse if it's for a normal function. 2027 if (Scope->isPlainFunction() && !Self->CachedFunctionScope) 2028 Self->CachedFunctionScope.reset(Scope); 2029 else 2030 delete Scope; 2031 } 2032 2033 void Sema::PushCompoundScope(bool IsStmtExpr) { 2034 getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr)); 2035 } 2036 2037 void Sema::PopCompoundScope() { 2038 FunctionScopeInfo *CurFunction = getCurFunction(); 2039 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop"); 2040 2041 CurFunction->CompoundScopes.pop_back(); 2042 } 2043 2044 /// Determine whether any errors occurred within this function/method/ 2045 /// block. 2046 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const { 2047 return getCurFunction()->hasUnrecoverableErrorOccurred(); 2048 } 2049 2050 void Sema::setFunctionHasBranchIntoScope() { 2051 if (!FunctionScopes.empty()) 2052 FunctionScopes.back()->setHasBranchIntoScope(); 2053 } 2054 2055 void Sema::setFunctionHasBranchProtectedScope() { 2056 if (!FunctionScopes.empty()) 2057 FunctionScopes.back()->setHasBranchProtectedScope(); 2058 } 2059 2060 void Sema::setFunctionHasIndirectGoto() { 2061 if (!FunctionScopes.empty()) 2062 FunctionScopes.back()->setHasIndirectGoto(); 2063 } 2064 2065 BlockScopeInfo *Sema::getCurBlock() { 2066 if (FunctionScopes.empty()) 2067 return nullptr; 2068 2069 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back()); 2070 if (CurBSI && CurBSI->TheDecl && 2071 !CurBSI->TheDecl->Encloses(CurContext)) { 2072 // We have switched contexts due to template instantiation. 2073 assert(!CodeSynthesisContexts.empty()); 2074 return nullptr; 2075 } 2076 2077 return CurBSI; 2078 } 2079 2080 FunctionScopeInfo *Sema::getEnclosingFunction() const { 2081 if (FunctionScopes.empty()) 2082 return nullptr; 2083 2084 for (int e = FunctionScopes.size() - 1; e >= 0; --e) { 2085 if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) 2086 continue; 2087 return FunctionScopes[e]; 2088 } 2089 return nullptr; 2090 } 2091 2092 LambdaScopeInfo *Sema::getEnclosingLambda() const { 2093 for (auto *Scope : llvm::reverse(FunctionScopes)) { 2094 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) { 2095 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) { 2096 // We have switched contexts due to template instantiation. 2097 // FIXME: We should swap out the FunctionScopes during code synthesis 2098 // so that we don't need to check for this. 2099 assert(!CodeSynthesisContexts.empty()); 2100 return nullptr; 2101 } 2102 return LSI; 2103 } 2104 } 2105 return nullptr; 2106 } 2107 2108 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) { 2109 if (FunctionScopes.empty()) 2110 return nullptr; 2111 2112 auto I = FunctionScopes.rbegin(); 2113 if (IgnoreNonLambdaCapturingScope) { 2114 auto E = FunctionScopes.rend(); 2115 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I)) 2116 ++I; 2117 if (I == E) 2118 return nullptr; 2119 } 2120 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I); 2121 if (CurLSI && CurLSI->Lambda && 2122 !CurLSI->Lambda->Encloses(CurContext)) { 2123 // We have switched contexts due to template instantiation. 2124 assert(!CodeSynthesisContexts.empty()); 2125 return nullptr; 2126 } 2127 2128 return CurLSI; 2129 } 2130 2131 // We have a generic lambda if we parsed auto parameters, or we have 2132 // an associated template parameter list. 2133 LambdaScopeInfo *Sema::getCurGenericLambda() { 2134 if (LambdaScopeInfo *LSI = getCurLambda()) { 2135 return (LSI->TemplateParams.size() || 2136 LSI->GLTemplateParameterList) ? LSI : nullptr; 2137 } 2138 return nullptr; 2139 } 2140 2141 2142 void Sema::ActOnComment(SourceRange Comment) { 2143 if (!LangOpts.RetainCommentsFromSystemHeaders && 2144 SourceMgr.isInSystemHeader(Comment.getBegin())) 2145 return; 2146 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false); 2147 if (RC.isAlmostTrailingComment()) { 2148 SourceRange MagicMarkerRange(Comment.getBegin(), 2149 Comment.getBegin().getLocWithOffset(3)); 2150 StringRef MagicMarkerText; 2151 switch (RC.getKind()) { 2152 case RawComment::RCK_OrdinaryBCPL: 2153 MagicMarkerText = "///<"; 2154 break; 2155 case RawComment::RCK_OrdinaryC: 2156 MagicMarkerText = "/**<"; 2157 break; 2158 default: 2159 llvm_unreachable("if this is an almost Doxygen comment, " 2160 "it should be ordinary"); 2161 } 2162 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) << 2163 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText); 2164 } 2165 Context.addComment(RC); 2166 } 2167 2168 // Pin this vtable to this file. 2169 ExternalSemaSource::~ExternalSemaSource() {} 2170 char ExternalSemaSource::ID; 2171 2172 void ExternalSemaSource::ReadMethodPool(Selector Sel) { } 2173 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { } 2174 2175 void ExternalSemaSource::ReadKnownNamespaces( 2176 SmallVectorImpl<NamespaceDecl *> &Namespaces) { 2177 } 2178 2179 void ExternalSemaSource::ReadUndefinedButUsed( 2180 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {} 2181 2182 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< 2183 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} 2184 2185 /// Figure out if an expression could be turned into a call. 2186 /// 2187 /// Use this when trying to recover from an error where the programmer may have 2188 /// written just the name of a function instead of actually calling it. 2189 /// 2190 /// \param E - The expression to examine. 2191 /// \param ZeroArgCallReturnTy - If the expression can be turned into a call 2192 /// with no arguments, this parameter is set to the type returned by such a 2193 /// call; otherwise, it is set to an empty QualType. 2194 /// \param OverloadSet - If the expression is an overloaded function 2195 /// name, this parameter is populated with the decls of the various overloads. 2196 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, 2197 UnresolvedSetImpl &OverloadSet) { 2198 ZeroArgCallReturnTy = QualType(); 2199 OverloadSet.clear(); 2200 2201 const OverloadExpr *Overloads = nullptr; 2202 bool IsMemExpr = false; 2203 if (E.getType() == Context.OverloadTy) { 2204 OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); 2205 2206 // Ignore overloads that are pointer-to-member constants. 2207 if (FR.HasFormOfMemberPointer) 2208 return false; 2209 2210 Overloads = FR.Expression; 2211 } else if (E.getType() == Context.BoundMemberTy) { 2212 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens()); 2213 IsMemExpr = true; 2214 } 2215 2216 bool Ambiguous = false; 2217 bool IsMV = false; 2218 2219 if (Overloads) { 2220 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), 2221 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { 2222 OverloadSet.addDecl(*it); 2223 2224 // Check whether the function is a non-template, non-member which takes no 2225 // arguments. 2226 if (IsMemExpr) 2227 continue; 2228 if (const FunctionDecl *OverloadDecl 2229 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { 2230 if (OverloadDecl->getMinRequiredArguments() == 0) { 2231 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous && 2232 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() || 2233 OverloadDecl->isCPUSpecificMultiVersion()))) { 2234 ZeroArgCallReturnTy = QualType(); 2235 Ambiguous = true; 2236 } else { 2237 ZeroArgCallReturnTy = OverloadDecl->getReturnType(); 2238 IsMV = OverloadDecl->isCPUDispatchMultiVersion() || 2239 OverloadDecl->isCPUSpecificMultiVersion(); 2240 } 2241 } 2242 } 2243 } 2244 2245 // If it's not a member, use better machinery to try to resolve the call 2246 if (!IsMemExpr) 2247 return !ZeroArgCallReturnTy.isNull(); 2248 } 2249 2250 // Attempt to call the member with no arguments - this will correctly handle 2251 // member templates with defaults/deduction of template arguments, overloads 2252 // with default arguments, etc. 2253 if (IsMemExpr && !E.isTypeDependent()) { 2254 Sema::TentativeAnalysisScope Trap(*this); 2255 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), 2256 None, SourceLocation()); 2257 if (R.isUsable()) { 2258 ZeroArgCallReturnTy = R.get()->getType(); 2259 return true; 2260 } 2261 return false; 2262 } 2263 2264 if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { 2265 if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { 2266 if (Fun->getMinRequiredArguments() == 0) 2267 ZeroArgCallReturnTy = Fun->getReturnType(); 2268 return true; 2269 } 2270 } 2271 2272 // We don't have an expression that's convenient to get a FunctionDecl from, 2273 // but we can at least check if the type is "function of 0 arguments". 2274 QualType ExprTy = E.getType(); 2275 const FunctionType *FunTy = nullptr; 2276 QualType PointeeTy = ExprTy->getPointeeType(); 2277 if (!PointeeTy.isNull()) 2278 FunTy = PointeeTy->getAs<FunctionType>(); 2279 if (!FunTy) 2280 FunTy = ExprTy->getAs<FunctionType>(); 2281 2282 if (const FunctionProtoType *FPT = 2283 dyn_cast_or_null<FunctionProtoType>(FunTy)) { 2284 if (FPT->getNumParams() == 0) 2285 ZeroArgCallReturnTy = FunTy->getReturnType(); 2286 return true; 2287 } 2288 return false; 2289 } 2290 2291 /// Give notes for a set of overloads. 2292 /// 2293 /// A companion to tryExprAsCall. In cases when the name that the programmer 2294 /// wrote was an overloaded function, we may be able to make some guesses about 2295 /// plausible overloads based on their return types; such guesses can be handed 2296 /// off to this method to be emitted as notes. 2297 /// 2298 /// \param Overloads - The overloads to note. 2299 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to 2300 /// -fshow-overloads=best, this is the location to attach to the note about too 2301 /// many candidates. Typically this will be the location of the original 2302 /// ill-formed expression. 2303 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, 2304 const SourceLocation FinalNoteLoc) { 2305 int ShownOverloads = 0; 2306 int SuppressedOverloads = 0; 2307 for (UnresolvedSetImpl::iterator It = Overloads.begin(), 2308 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2309 // FIXME: Magic number for max shown overloads stolen from 2310 // OverloadCandidateSet::NoteCandidates. 2311 if (ShownOverloads >= 4 && S.Diags.getShowOverloads() == Ovl_Best) { 2312 ++SuppressedOverloads; 2313 continue; 2314 } 2315 2316 NamedDecl *Fn = (*It)->getUnderlyingDecl(); 2317 // Don't print overloads for non-default multiversioned functions. 2318 if (const auto *FD = Fn->getAsFunction()) { 2319 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() && 2320 !FD->getAttr<TargetAttr>()->isDefaultVersion()) 2321 continue; 2322 } 2323 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call); 2324 ++ShownOverloads; 2325 } 2326 2327 if (SuppressedOverloads) 2328 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates) 2329 << SuppressedOverloads; 2330 } 2331 2332 static void notePlausibleOverloads(Sema &S, SourceLocation Loc, 2333 const UnresolvedSetImpl &Overloads, 2334 bool (*IsPlausibleResult)(QualType)) { 2335 if (!IsPlausibleResult) 2336 return noteOverloads(S, Overloads, Loc); 2337 2338 UnresolvedSet<2> PlausibleOverloads; 2339 for (OverloadExpr::decls_iterator It = Overloads.begin(), 2340 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2341 const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It); 2342 QualType OverloadResultTy = OverloadDecl->getReturnType(); 2343 if (IsPlausibleResult(OverloadResultTy)) 2344 PlausibleOverloads.addDecl(It.getDecl()); 2345 } 2346 noteOverloads(S, PlausibleOverloads, Loc); 2347 } 2348 2349 /// Determine whether the given expression can be called by just 2350 /// putting parentheses after it. Notably, expressions with unary 2351 /// operators can't be because the unary operator will start parsing 2352 /// outside the call. 2353 static bool IsCallableWithAppend(Expr *E) { 2354 E = E->IgnoreImplicit(); 2355 return (!isa<CStyleCastExpr>(E) && 2356 !isa<UnaryOperator>(E) && 2357 !isa<BinaryOperator>(E) && 2358 !isa<CXXOperatorCallExpr>(E)); 2359 } 2360 2361 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) { 2362 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2363 E = UO->getSubExpr(); 2364 2365 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { 2366 if (ULE->getNumDecls() == 0) 2367 return false; 2368 2369 const NamedDecl *ND = *ULE->decls_begin(); 2370 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2371 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion(); 2372 } 2373 return false; 2374 } 2375 2376 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, 2377 bool ForceComplain, 2378 bool (*IsPlausibleResult)(QualType)) { 2379 SourceLocation Loc = E.get()->getExprLoc(); 2380 SourceRange Range = E.get()->getSourceRange(); 2381 2382 QualType ZeroArgCallTy; 2383 UnresolvedSet<4> Overloads; 2384 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) && 2385 !ZeroArgCallTy.isNull() && 2386 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) { 2387 // At this point, we know E is potentially callable with 0 2388 // arguments and that it returns something of a reasonable type, 2389 // so we can emit a fixit and carry on pretending that E was 2390 // actually a CallExpr. 2391 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd()); 2392 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2393 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range 2394 << (IsCallableWithAppend(E.get()) 2395 ? FixItHint::CreateInsertion(ParenInsertionLoc, "()") 2396 : FixItHint()); 2397 if (!IsMV) 2398 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2399 2400 // FIXME: Try this before emitting the fixit, and suppress diagnostics 2401 // while doing so. 2402 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None, 2403 Range.getEnd().getLocWithOffset(1)); 2404 return true; 2405 } 2406 2407 if (!ForceComplain) return false; 2408 2409 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2410 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range; 2411 if (!IsMV) 2412 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2413 E = ExprError(); 2414 return true; 2415 } 2416 2417 IdentifierInfo *Sema::getSuperIdentifier() const { 2418 if (!Ident_super) 2419 Ident_super = &Context.Idents.get("super"); 2420 return Ident_super; 2421 } 2422 2423 IdentifierInfo *Sema::getFloat128Identifier() const { 2424 if (!Ident___float128) 2425 Ident___float128 = &Context.Idents.get("__float128"); 2426 return Ident___float128; 2427 } 2428 2429 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, 2430 CapturedRegionKind K, 2431 unsigned OpenMPCaptureLevel) { 2432 auto *CSI = new CapturedRegionScopeInfo( 2433 getDiagnostics(), S, CD, RD, CD->getContextParam(), K, 2434 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0, 2435 OpenMPCaptureLevel); 2436 CSI->ReturnType = Context.VoidTy; 2437 FunctionScopes.push_back(CSI); 2438 } 2439 2440 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { 2441 if (FunctionScopes.empty()) 2442 return nullptr; 2443 2444 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); 2445 } 2446 2447 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & 2448 Sema::getMismatchingDeleteExpressions() const { 2449 return DeleteExprs; 2450 } 2451 2452 void Sema::setOpenCLExtensionForType(QualType T, llvm::StringRef ExtStr) { 2453 if (ExtStr.empty()) 2454 return; 2455 llvm::SmallVector<StringRef, 1> Exts; 2456 ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false); 2457 auto CanT = T.getCanonicalType().getTypePtr(); 2458 for (auto &I : Exts) 2459 OpenCLTypeExtMap[CanT].insert(I.str()); 2460 } 2461 2462 void Sema::setOpenCLExtensionForDecl(Decl *FD, StringRef ExtStr) { 2463 llvm::SmallVector<StringRef, 1> Exts; 2464 ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false); 2465 if (Exts.empty()) 2466 return; 2467 for (auto &I : Exts) 2468 OpenCLDeclExtMap[FD].insert(I.str()); 2469 } 2470 2471 void Sema::setCurrentOpenCLExtensionForType(QualType T) { 2472 if (CurrOpenCLExtension.empty()) 2473 return; 2474 setOpenCLExtensionForType(T, CurrOpenCLExtension); 2475 } 2476 2477 void Sema::setCurrentOpenCLExtensionForDecl(Decl *D) { 2478 if (CurrOpenCLExtension.empty()) 2479 return; 2480 setOpenCLExtensionForDecl(D, CurrOpenCLExtension); 2481 } 2482 2483 std::string Sema::getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD) { 2484 if (!OpenCLDeclExtMap.empty()) 2485 return getOpenCLExtensionsFromExtMap(FD, OpenCLDeclExtMap); 2486 2487 return ""; 2488 } 2489 2490 std::string Sema::getOpenCLExtensionsFromTypeExtMap(FunctionType *FT) { 2491 if (!OpenCLTypeExtMap.empty()) 2492 return getOpenCLExtensionsFromExtMap(FT, OpenCLTypeExtMap); 2493 2494 return ""; 2495 } 2496 2497 template <typename T, typename MapT> 2498 std::string Sema::getOpenCLExtensionsFromExtMap(T *FDT, MapT &Map) { 2499 auto Loc = Map.find(FDT); 2500 return llvm::join(Loc->second, " "); 2501 } 2502 2503 bool Sema::isOpenCLDisabledDecl(Decl *FD) { 2504 auto Loc = OpenCLDeclExtMap.find(FD); 2505 if (Loc == OpenCLDeclExtMap.end()) 2506 return false; 2507 for (auto &I : Loc->second) { 2508 if (!getOpenCLOptions().isEnabled(I)) 2509 return true; 2510 } 2511 return false; 2512 } 2513 2514 template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> 2515 bool Sema::checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, 2516 DiagInfoT DiagInfo, MapT &Map, 2517 unsigned Selector, 2518 SourceRange SrcRange) { 2519 auto Loc = Map.find(D); 2520 if (Loc == Map.end()) 2521 return false; 2522 bool Disabled = false; 2523 for (auto &I : Loc->second) { 2524 if (I != CurrOpenCLExtension && !getOpenCLOptions().isEnabled(I)) { 2525 Diag(DiagLoc, diag::err_opencl_requires_extension) << Selector << DiagInfo 2526 << I << SrcRange; 2527 Disabled = true; 2528 } 2529 } 2530 return Disabled; 2531 } 2532 2533 bool Sema::checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType QT) { 2534 // Check extensions for declared types. 2535 Decl *Decl = nullptr; 2536 if (auto TypedefT = dyn_cast<TypedefType>(QT.getTypePtr())) 2537 Decl = TypedefT->getDecl(); 2538 if (auto TagT = dyn_cast<TagType>(QT.getCanonicalType().getTypePtr())) 2539 Decl = TagT->getDecl(); 2540 auto Loc = DS.getTypeSpecTypeLoc(); 2541 2542 // Check extensions for vector types. 2543 // e.g. double4 is not allowed when cl_khr_fp64 is absent. 2544 if (QT->isExtVectorType()) { 2545 auto TypePtr = QT->castAs<ExtVectorType>()->getElementType().getTypePtr(); 2546 return checkOpenCLDisabledTypeOrDecl(TypePtr, Loc, QT, OpenCLTypeExtMap); 2547 } 2548 2549 if (checkOpenCLDisabledTypeOrDecl(Decl, Loc, QT, OpenCLDeclExtMap)) 2550 return true; 2551 2552 // Check extensions for builtin types. 2553 return checkOpenCLDisabledTypeOrDecl(QT.getCanonicalType().getTypePtr(), Loc, 2554 QT, OpenCLTypeExtMap); 2555 } 2556 2557 bool Sema::checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E) { 2558 IdentifierInfo *FnName = D.getIdentifier(); 2559 return checkOpenCLDisabledTypeOrDecl(&D, E.getBeginLoc(), FnName, 2560 OpenCLDeclExtMap, 1, D.getSourceRange()); 2561 } 2562