xref: /freebsd/contrib/llvm-project/clang/lib/Sema/Sema.cpp (revision cb14a3fe5122c879eae1fb480ed7ce82a699ddb6)
1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the actions class which performs semantic analysis and
10 // builds an AST out of a parse stream.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "UsedDeclVisitor.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/ASTDiagnostic.h"
17 #include "clang/AST/Decl.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/DeclFriend.h"
20 #include "clang/AST/DeclObjC.h"
21 #include "clang/AST/Expr.h"
22 #include "clang/AST/ExprCXX.h"
23 #include "clang/AST/PrettyDeclStackTrace.h"
24 #include "clang/AST/StmtCXX.h"
25 #include "clang/Basic/DarwinSDKInfo.h"
26 #include "clang/Basic/DiagnosticOptions.h"
27 #include "clang/Basic/PartialDiagnostic.h"
28 #include "clang/Basic/SourceManager.h"
29 #include "clang/Basic/Stack.h"
30 #include "clang/Basic/TargetInfo.h"
31 #include "clang/Lex/HeaderSearch.h"
32 #include "clang/Lex/HeaderSearchOptions.h"
33 #include "clang/Lex/Preprocessor.h"
34 #include "clang/Sema/CXXFieldCollector.h"
35 #include "clang/Sema/DelayedDiagnostic.h"
36 #include "clang/Sema/EnterExpressionEvaluationContext.h"
37 #include "clang/Sema/ExternalSemaSource.h"
38 #include "clang/Sema/Initialization.h"
39 #include "clang/Sema/MultiplexExternalSemaSource.h"
40 #include "clang/Sema/ObjCMethodList.h"
41 #include "clang/Sema/RISCVIntrinsicManager.h"
42 #include "clang/Sema/Scope.h"
43 #include "clang/Sema/ScopeInfo.h"
44 #include "clang/Sema/SemaConsumer.h"
45 #include "clang/Sema/SemaInternal.h"
46 #include "clang/Sema/TemplateDeduction.h"
47 #include "clang/Sema/TemplateInstCallback.h"
48 #include "clang/Sema/TypoCorrection.h"
49 #include "llvm/ADT/DenseMap.h"
50 #include "llvm/ADT/STLExtras.h"
51 #include "llvm/ADT/SmallPtrSet.h"
52 #include "llvm/Support/TimeProfiler.h"
53 #include <optional>
54 
55 using namespace clang;
56 using namespace sema;
57 
58 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
59   return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts);
60 }
61 
62 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
63 
64 DarwinSDKInfo *
65 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
66                                               StringRef Platform) {
67   auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
68   if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
69     Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
70         << Platform;
71     WarnedDarwinSDKInfoMissing = true;
72   }
73   return SDKInfo;
74 }
75 
76 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
77   if (CachedDarwinSDKInfo)
78     return CachedDarwinSDKInfo->get();
79   auto SDKInfo = parseDarwinSDKInfo(
80       PP.getFileManager().getVirtualFileSystem(),
81       PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
82   if (SDKInfo && *SDKInfo) {
83     CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo));
84     return CachedDarwinSDKInfo->get();
85   }
86   if (!SDKInfo)
87     llvm::consumeError(SDKInfo.takeError());
88   CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
89   return nullptr;
90 }
91 
92 IdentifierInfo *
93 Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
94                                                  unsigned int Index) {
95   std::string InventedName;
96   llvm::raw_string_ostream OS(InventedName);
97 
98   if (!ParamName)
99     OS << "auto:" << Index + 1;
100   else
101     OS << ParamName->getName() << ":auto";
102 
103   OS.flush();
104   return &Context.Idents.get(OS.str());
105 }
106 
107 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
108                                        const Preprocessor &PP) {
109   PrintingPolicy Policy = Context.getPrintingPolicy();
110   // In diagnostics, we print _Bool as bool if the latter is defined as the
111   // former.
112   Policy.Bool = Context.getLangOpts().Bool;
113   if (!Policy.Bool) {
114     if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) {
115       Policy.Bool = BoolMacro->isObjectLike() &&
116                     BoolMacro->getNumTokens() == 1 &&
117                     BoolMacro->getReplacementToken(0).is(tok::kw__Bool);
118     }
119   }
120 
121   // Shorten the data output if needed
122   Policy.EntireContentsOfLargeArray = false;
123 
124   return Policy;
125 }
126 
127 void Sema::ActOnTranslationUnitScope(Scope *S) {
128   TUScope = S;
129   PushDeclContext(S, Context.getTranslationUnitDecl());
130 }
131 
132 namespace clang {
133 namespace sema {
134 
135 class SemaPPCallbacks : public PPCallbacks {
136   Sema *S = nullptr;
137   llvm::SmallVector<SourceLocation, 8> IncludeStack;
138 
139 public:
140   void set(Sema &S) { this->S = &S; }
141 
142   void reset() { S = nullptr; }
143 
144   void FileChanged(SourceLocation Loc, FileChangeReason Reason,
145                    SrcMgr::CharacteristicKind FileType,
146                    FileID PrevFID) override {
147     if (!S)
148       return;
149     switch (Reason) {
150     case EnterFile: {
151       SourceManager &SM = S->getSourceManager();
152       SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc));
153       if (IncludeLoc.isValid()) {
154         if (llvm::timeTraceProfilerEnabled()) {
155           OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getFileID(Loc));
156           llvm::timeTraceProfilerBegin("Source", FE ? FE->getName()
157                                                     : StringRef("<unknown>"));
158         }
159 
160         IncludeStack.push_back(IncludeLoc);
161         S->DiagnoseNonDefaultPragmaAlignPack(
162             Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
163             IncludeLoc);
164       }
165       break;
166     }
167     case ExitFile:
168       if (!IncludeStack.empty()) {
169         if (llvm::timeTraceProfilerEnabled())
170           llvm::timeTraceProfilerEnd();
171 
172         S->DiagnoseNonDefaultPragmaAlignPack(
173             Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
174             IncludeStack.pop_back_val());
175       }
176       break;
177     default:
178       break;
179     }
180   }
181 };
182 
183 } // end namespace sema
184 } // end namespace clang
185 
186 const unsigned Sema::MaxAlignmentExponent;
187 const uint64_t Sema::MaximumAlignment;
188 
189 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
190            TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
191     : ExternalSource(nullptr), CurFPFeatures(pp.getLangOpts()),
192       LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
193       Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
194       APINotes(SourceMgr, LangOpts), CollectStats(false),
195       CodeCompleter(CodeCompleter), CurContext(nullptr),
196       OriginalLexicalContext(nullptr), MSStructPragmaOn(false),
197       MSPointerToMemberRepresentationMethod(
198           LangOpts.getMSPointerToMemberRepresentationMethod()),
199       VtorDispStack(LangOpts.getVtorDispMode()),
200       AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
201       DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
202       CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
203       FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
204       VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
205       IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr),
206       LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
207       StdInitializerList(nullptr), StdCoroutineTraitsCache(nullptr),
208       CXXTypeInfoDecl(nullptr), StdSourceLocationImplDecl(nullptr),
209       NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr),
210       StringWithUTF8StringMethod(nullptr),
211       ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
212       ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
213       DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false),
214       TUKind(TUKind), NumSFINAEErrors(0),
215       FullyCheckedComparisonCategories(
216           static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
217       SatisfactionCache(Context), AccessCheckingSFINAE(false),
218       InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
219       ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr),
220       DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
221       ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
222       CurScope(nullptr), Ident_super(nullptr) {
223   assert(pp.TUKind == TUKind);
224   TUScope = nullptr;
225 
226   LoadedExternalKnownNamespaces = false;
227   for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
228     NSNumberLiteralMethods[I] = nullptr;
229 
230   if (getLangOpts().ObjC)
231     NSAPIObj.reset(new NSAPI(Context));
232 
233   if (getLangOpts().CPlusPlus)
234     FieldCollector.reset(new CXXFieldCollector());
235 
236   // Tell diagnostics how to render things from the AST library.
237   Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
238 
239   // This evaluation context exists to ensure that there's always at least one
240   // valid evaluation context available. It is never removed from the
241   // evaluation stack.
242   ExprEvalContexts.emplace_back(
243       ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
244       nullptr, ExpressionEvaluationContextRecord::EK_Other);
245 
246   // Initialization of data sharing attributes stack for OpenMP
247   InitDataSharingAttributesStack();
248 
249   std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
250       std::make_unique<sema::SemaPPCallbacks>();
251   SemaPPCallbackHandler = Callbacks.get();
252   PP.addPPCallbacks(std::move(Callbacks));
253   SemaPPCallbackHandler->set(*this);
254 
255   CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
256 }
257 
258 // Anchor Sema's type info to this TU.
259 void Sema::anchor() {}
260 
261 void Sema::addImplicitTypedef(StringRef Name, QualType T) {
262   DeclarationName DN = &Context.Idents.get(Name);
263   if (IdResolver.begin(DN) == IdResolver.end())
264     PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
265 }
266 
267 void Sema::Initialize() {
268   if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
269     SC->InitializeSema(*this);
270 
271   // Tell the external Sema source about this Sema object.
272   if (ExternalSemaSource *ExternalSema
273       = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
274     ExternalSema->InitializeSema(*this);
275 
276   // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
277   // will not be able to merge any duplicate __va_list_tag decls correctly.
278   VAListTagName = PP.getIdentifierInfo("__va_list_tag");
279 
280   if (!TUScope)
281     return;
282 
283   // Initialize predefined 128-bit integer types, if needed.
284   if (Context.getTargetInfo().hasInt128Type() ||
285       (Context.getAuxTargetInfo() &&
286        Context.getAuxTargetInfo()->hasInt128Type())) {
287     // If either of the 128-bit integer types are unavailable to name lookup,
288     // define them now.
289     DeclarationName Int128 = &Context.Idents.get("__int128_t");
290     if (IdResolver.begin(Int128) == IdResolver.end())
291       PushOnScopeChains(Context.getInt128Decl(), TUScope);
292 
293     DeclarationName UInt128 = &Context.Idents.get("__uint128_t");
294     if (IdResolver.begin(UInt128) == IdResolver.end())
295       PushOnScopeChains(Context.getUInt128Decl(), TUScope);
296   }
297 
298 
299   // Initialize predefined Objective-C types:
300   if (getLangOpts().ObjC) {
301     // If 'SEL' does not yet refer to any declarations, make it refer to the
302     // predefined 'SEL'.
303     DeclarationName SEL = &Context.Idents.get("SEL");
304     if (IdResolver.begin(SEL) == IdResolver.end())
305       PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
306 
307     // If 'id' does not yet refer to any declarations, make it refer to the
308     // predefined 'id'.
309     DeclarationName Id = &Context.Idents.get("id");
310     if (IdResolver.begin(Id) == IdResolver.end())
311       PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
312 
313     // Create the built-in typedef for 'Class'.
314     DeclarationName Class = &Context.Idents.get("Class");
315     if (IdResolver.begin(Class) == IdResolver.end())
316       PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
317 
318     // Create the built-in forward declaratino for 'Protocol'.
319     DeclarationName Protocol = &Context.Idents.get("Protocol");
320     if (IdResolver.begin(Protocol) == IdResolver.end())
321       PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
322   }
323 
324   // Create the internal type for the *StringMakeConstantString builtins.
325   DeclarationName ConstantString = &Context.Idents.get("__NSConstantString");
326   if (IdResolver.begin(ConstantString) == IdResolver.end())
327     PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope);
328 
329   // Initialize Microsoft "predefined C++ types".
330   if (getLangOpts().MSVCCompat) {
331     if (getLangOpts().CPlusPlus &&
332         IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
333       PushOnScopeChains(
334           Context.buildImplicitRecord("type_info", TagTypeKind::Class),
335           TUScope);
336 
337     addImplicitTypedef("size_t", Context.getSizeType());
338   }
339 
340   // Initialize predefined OpenCL types and supported extensions and (optional)
341   // core features.
342   if (getLangOpts().OpenCL) {
343     getOpenCLOptions().addSupport(
344         Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
345     addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
346     addImplicitTypedef("event_t", Context.OCLEventTy);
347     auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
348     if (OCLCompatibleVersion >= 200) {
349       if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
350         addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
351         addImplicitTypedef("queue_t", Context.OCLQueueTy);
352       }
353       if (getLangOpts().OpenCLPipes)
354         addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
355       addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
356       addImplicitTypedef("atomic_uint",
357                          Context.getAtomicType(Context.UnsignedIntTy));
358       addImplicitTypedef("atomic_float",
359                          Context.getAtomicType(Context.FloatTy));
360       // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
361       // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
362       addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy));
363 
364 
365       // OpenCL v2.0 s6.13.11.6:
366       // - The atomic_long and atomic_ulong types are supported if the
367       //   cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
368       //   extensions are supported.
369       // - The atomic_double type is only supported if double precision
370       //   is supported and the cl_khr_int64_base_atomics and
371       //   cl_khr_int64_extended_atomics extensions are supported.
372       // - If the device address space is 64-bits, the data types
373       //   atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
374       //   atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
375       //   cl_khr_int64_extended_atomics extensions are supported.
376 
377       auto AddPointerSizeDependentTypes = [&]() {
378         auto AtomicSizeT = Context.getAtomicType(Context.getSizeType());
379         auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType());
380         auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType());
381         auto AtomicPtrDiffT =
382             Context.getAtomicType(Context.getPointerDiffType());
383         addImplicitTypedef("atomic_size_t", AtomicSizeT);
384         addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT);
385         addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT);
386         addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT);
387       };
388 
389       if (Context.getTypeSize(Context.getSizeType()) == 32) {
390         AddPointerSizeDependentTypes();
391       }
392 
393       if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) {
394         auto AtomicHalfT = Context.getAtomicType(Context.HalfTy);
395         addImplicitTypedef("atomic_half", AtomicHalfT);
396       }
397 
398       std::vector<QualType> Atomic64BitTypes;
399       if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics",
400                                          getLangOpts()) &&
401           getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics",
402                                          getLangOpts())) {
403         if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) {
404           auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy);
405           addImplicitTypedef("atomic_double", AtomicDoubleT);
406           Atomic64BitTypes.push_back(AtomicDoubleT);
407         }
408         auto AtomicLongT = Context.getAtomicType(Context.LongTy);
409         auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy);
410         addImplicitTypedef("atomic_long", AtomicLongT);
411         addImplicitTypedef("atomic_ulong", AtomicULongT);
412 
413 
414         if (Context.getTypeSize(Context.getSizeType()) == 64) {
415           AddPointerSizeDependentTypes();
416         }
417       }
418     }
419 
420 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext)                                      \
421   if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) {                   \
422     addImplicitTypedef(#ExtType, Context.Id##Ty);                              \
423   }
424 #include "clang/Basic/OpenCLExtensionTypes.def"
425   }
426 
427   if (Context.getTargetInfo().hasAArch64SVETypes()) {
428 #define SVE_TYPE(Name, Id, SingletonId) \
429     addImplicitTypedef(Name, Context.SingletonId);
430 #include "clang/Basic/AArch64SVEACLETypes.def"
431   }
432 
433   if (Context.getTargetInfo().getTriple().isPPC64()) {
434 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
435       addImplicitTypedef(#Name, Context.Id##Ty);
436 #include "clang/Basic/PPCTypes.def"
437 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
438     addImplicitTypedef(#Name, Context.Id##Ty);
439 #include "clang/Basic/PPCTypes.def"
440   }
441 
442   if (Context.getTargetInfo().hasRISCVVTypes()) {
443 #define RVV_TYPE(Name, Id, SingletonId)                                        \
444   addImplicitTypedef(Name, Context.SingletonId);
445 #include "clang/Basic/RISCVVTypes.def"
446   }
447 
448   if (Context.getTargetInfo().getTriple().isWasm() &&
449       Context.getTargetInfo().hasFeature("reference-types")) {
450 #define WASM_TYPE(Name, Id, SingletonId)                                       \
451   addImplicitTypedef(Name, Context.SingletonId);
452 #include "clang/Basic/WebAssemblyReferenceTypes.def"
453   }
454 
455   if (Context.getTargetInfo().hasBuiltinMSVaList()) {
456     DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
457     if (IdResolver.begin(MSVaList) == IdResolver.end())
458       PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
459   }
460 
461   DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
462   if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
463     PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
464 }
465 
466 Sema::~Sema() {
467   assert(InstantiatingSpecializations.empty() &&
468          "failed to clean up an InstantiatingTemplate?");
469 
470   if (VisContext) FreeVisContext();
471 
472   // Kill all the active scopes.
473   for (sema::FunctionScopeInfo *FSI : FunctionScopes)
474     delete FSI;
475 
476   // Tell the SemaConsumer to forget about us; we're going out of scope.
477   if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
478     SC->ForgetSema();
479 
480   // Detach from the external Sema source.
481   if (ExternalSemaSource *ExternalSema
482         = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
483     ExternalSema->ForgetSema();
484 
485   // Delete cached satisfactions.
486   std::vector<ConstraintSatisfaction *> Satisfactions;
487   Satisfactions.reserve(SatisfactionCache.size());
488   for (auto &Node : SatisfactionCache)
489     Satisfactions.push_back(&Node);
490   for (auto *Node : Satisfactions)
491     delete Node;
492 
493   threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache);
494 
495   // Destroys data sharing attributes stack for OpenMP
496   DestroyDataSharingAttributesStack();
497 
498   // Detach from the PP callback handler which outlives Sema since it's owned
499   // by the preprocessor.
500   SemaPPCallbackHandler->reset();
501 }
502 
503 void Sema::warnStackExhausted(SourceLocation Loc) {
504   // Only warn about this once.
505   if (!WarnedStackExhausted) {
506     Diag(Loc, diag::warn_stack_exhausted);
507     WarnedStackExhausted = true;
508   }
509 }
510 
511 void Sema::runWithSufficientStackSpace(SourceLocation Loc,
512                                        llvm::function_ref<void()> Fn) {
513   clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn);
514 }
515 
516 /// makeUnavailableInSystemHeader - There is an error in the current
517 /// context.  If we're still in a system header, and we can plausibly
518 /// make the relevant declaration unavailable instead of erroring, do
519 /// so and return true.
520 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
521                                       UnavailableAttr::ImplicitReason reason) {
522   // If we're not in a function, it's an error.
523   FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
524   if (!fn) return false;
525 
526   // If we're in template instantiation, it's an error.
527   if (inTemplateInstantiation())
528     return false;
529 
530   // If that function's not in a system header, it's an error.
531   if (!Context.getSourceManager().isInSystemHeader(loc))
532     return false;
533 
534   // If the function is already unavailable, it's not an error.
535   if (fn->hasAttr<UnavailableAttr>()) return true;
536 
537   fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
538   return true;
539 }
540 
541 ASTMutationListener *Sema::getASTMutationListener() const {
542   return getASTConsumer().GetASTMutationListener();
543 }
544 
545 ///Registers an external source. If an external source already exists,
546 /// creates a multiplex external source and appends to it.
547 ///
548 ///\param[in] E - A non-null external sema source.
549 ///
550 void Sema::addExternalSource(ExternalSemaSource *E) {
551   assert(E && "Cannot use with NULL ptr");
552 
553   if (!ExternalSource) {
554     ExternalSource = E;
555     return;
556   }
557 
558   if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(ExternalSource))
559     Ex->AddSource(E);
560   else
561     ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
562 }
563 
564 /// Print out statistics about the semantic analysis.
565 void Sema::PrintStats() const {
566   llvm::errs() << "\n*** Semantic Analysis Stats:\n";
567   llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
568 
569   BumpAlloc.PrintStats();
570   AnalysisWarnings.PrintStats();
571 }
572 
573 void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
574                                                QualType SrcType,
575                                                SourceLocation Loc) {
576   std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
577   if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
578                            *ExprNullability != NullabilityKind::NullableResult))
579     return;
580 
581   std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
582   if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
583     return;
584 
585   Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
586 }
587 
588 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
589   // nullptr only exists from C++11 on, so don't warn on its absence earlier.
590   if (!getLangOpts().CPlusPlus11)
591     return;
592 
593   if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
594     return;
595 
596   const Expr *EStripped = E->IgnoreParenImpCasts();
597   if (EStripped->getType()->isNullPtrType())
598     return;
599   if (isa<GNUNullExpr>(EStripped))
600     return;
601 
602   if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
603                       E->getBeginLoc()))
604     return;
605 
606   // Don't diagnose the conversion from a 0 literal to a null pointer argument
607   // in a synthesized call to operator<=>.
608   if (!CodeSynthesisContexts.empty() &&
609       CodeSynthesisContexts.back().Kind ==
610           CodeSynthesisContext::RewritingOperatorAsSpaceship)
611     return;
612 
613   // Ignore null pointers in defaulted comparison operators.
614   FunctionDecl *FD = getCurFunctionDecl();
615   if (FD && FD->isDefaulted()) {
616     return;
617   }
618 
619   // If it is a macro from system header, and if the macro name is not "NULL",
620   // do not warn.
621   // Note that uses of "NULL" will be ignored above on systems that define it
622   // as __null.
623   SourceLocation MaybeMacroLoc = E->getBeginLoc();
624   if (Diags.getSuppressSystemWarnings() &&
625       SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
626       !findMacroSpelling(MaybeMacroLoc, "NULL"))
627     return;
628 
629   Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
630       << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
631 }
632 
633 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
634 /// If there is already an implicit cast, merge into the existing one.
635 /// The result is of the given category.
636 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
637                                    CastKind Kind, ExprValueKind VK,
638                                    const CXXCastPath *BasePath,
639                                    CheckedConversionKind CCK) {
640 #ifndef NDEBUG
641   if (VK == VK_PRValue && !E->isPRValue()) {
642     switch (Kind) {
643     default:
644       llvm_unreachable(
645           ("can't implicitly cast glvalue to prvalue with this cast "
646            "kind: " +
647            std::string(CastExpr::getCastKindName(Kind)))
648               .c_str());
649     case CK_Dependent:
650     case CK_LValueToRValue:
651     case CK_ArrayToPointerDecay:
652     case CK_FunctionToPointerDecay:
653     case CK_ToVoid:
654     case CK_NonAtomicToAtomic:
655       break;
656     }
657   }
658   assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
659          "can't cast prvalue to glvalue");
660 #endif
661 
662   diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
663   diagnoseZeroToNullptrConversion(Kind, E);
664 
665   QualType ExprTy = Context.getCanonicalType(E->getType());
666   QualType TypeTy = Context.getCanonicalType(Ty);
667 
668   if (ExprTy == TypeTy)
669     return E;
670 
671   if (Kind == CK_ArrayToPointerDecay) {
672     // C++1z [conv.array]: The temporary materialization conversion is applied.
673     // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
674     if (getLangOpts().CPlusPlus && E->isPRValue()) {
675       // The temporary is an lvalue in C++98 and an xvalue otherwise.
676       ExprResult Materialized = CreateMaterializeTemporaryExpr(
677           E->getType(), E, !getLangOpts().CPlusPlus11);
678       if (Materialized.isInvalid())
679         return ExprError();
680       E = Materialized.get();
681     }
682     // C17 6.7.1p6 footnote 124: The implementation can treat any register
683     // declaration simply as an auto declaration. However, whether or not
684     // addressable storage is actually used, the address of any part of an
685     // object declared with storage-class specifier register cannot be
686     // computed, either explicitly(by use of the unary & operator as discussed
687     // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
688     // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
689     // array declared with storage-class specifier register is sizeof.
690     if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
691       if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
692         if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
693           if (VD->getStorageClass() == SC_Register) {
694             Diag(E->getExprLoc(), diag::err_typecheck_address_of)
695                 << /*register variable*/ 3 << E->getSourceRange();
696             return ExprError();
697           }
698         }
699       }
700     }
701   }
702 
703   if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
704     if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
705       ImpCast->setType(Ty);
706       ImpCast->setValueKind(VK);
707       return E;
708     }
709   }
710 
711   return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK,
712                                   CurFPFeatureOverrides());
713 }
714 
715 /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
716 /// to the conversion from scalar type ScalarTy to the Boolean type.
717 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
718   switch (ScalarTy->getScalarTypeKind()) {
719   case Type::STK_Bool: return CK_NoOp;
720   case Type::STK_CPointer: return CK_PointerToBoolean;
721   case Type::STK_BlockPointer: return CK_PointerToBoolean;
722   case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
723   case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
724   case Type::STK_Integral: return CK_IntegralToBoolean;
725   case Type::STK_Floating: return CK_FloatingToBoolean;
726   case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
727   case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
728   case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
729   }
730   llvm_unreachable("unknown scalar type kind");
731 }
732 
733 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
734 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
735   if (D->getMostRecentDecl()->isUsed())
736     return true;
737 
738   if (D->isExternallyVisible())
739     return true;
740 
741   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
742     // If this is a function template and none of its specializations is used,
743     // we should warn.
744     if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
745       for (const auto *Spec : Template->specializations())
746         if (ShouldRemoveFromUnused(SemaRef, Spec))
747           return true;
748 
749     // UnusedFileScopedDecls stores the first declaration.
750     // The declaration may have become definition so check again.
751     const FunctionDecl *DeclToCheck;
752     if (FD->hasBody(DeclToCheck))
753       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
754 
755     // Later redecls may add new information resulting in not having to warn,
756     // so check again.
757     DeclToCheck = FD->getMostRecentDecl();
758     if (DeclToCheck != FD)
759       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
760   }
761 
762   if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
763     // If a variable usable in constant expressions is referenced,
764     // don't warn if it isn't used: if the value of a variable is required
765     // for the computation of a constant expression, it doesn't make sense to
766     // warn even if the variable isn't odr-used.  (isReferenced doesn't
767     // precisely reflect that, but it's a decent approximation.)
768     if (VD->isReferenced() &&
769         VD->mightBeUsableInConstantExpressions(SemaRef->Context))
770       return true;
771 
772     if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
773       // If this is a variable template and none of its specializations is used,
774       // we should warn.
775       for (const auto *Spec : Template->specializations())
776         if (ShouldRemoveFromUnused(SemaRef, Spec))
777           return true;
778 
779     // UnusedFileScopedDecls stores the first declaration.
780     // The declaration may have become definition so check again.
781     const VarDecl *DeclToCheck = VD->getDefinition();
782     if (DeclToCheck)
783       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
784 
785     // Later redecls may add new information resulting in not having to warn,
786     // so check again.
787     DeclToCheck = VD->getMostRecentDecl();
788     if (DeclToCheck != VD)
789       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
790   }
791 
792   return false;
793 }
794 
795 static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
796   if (const auto *FD = dyn_cast<FunctionDecl>(ND))
797     return FD->isExternC();
798   return cast<VarDecl>(ND)->isExternC();
799 }
800 
801 /// Determine whether ND is an external-linkage function or variable whose
802 /// type has no linkage.
803 bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
804   // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
805   // because we also want to catch the case where its type has VisibleNoLinkage,
806   // which does not affect the linkage of VD.
807   return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
808          !isExternalFormalLinkage(VD->getType()->getLinkage()) &&
809          !isFunctionOrVarDeclExternC(VD);
810 }
811 
812 /// Obtains a sorted list of functions and variables that are undefined but
813 /// ODR-used.
814 void Sema::getUndefinedButUsed(
815     SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
816   for (const auto &UndefinedUse : UndefinedButUsed) {
817     NamedDecl *ND = UndefinedUse.first;
818 
819     // Ignore attributes that have become invalid.
820     if (ND->isInvalidDecl()) continue;
821 
822     // __attribute__((weakref)) is basically a definition.
823     if (ND->hasAttr<WeakRefAttr>()) continue;
824 
825     if (isa<CXXDeductionGuideDecl>(ND))
826       continue;
827 
828     if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
829       // An exported function will always be emitted when defined, so even if
830       // the function is inline, it doesn't have to be emitted in this TU. An
831       // imported function implies that it has been exported somewhere else.
832       continue;
833     }
834 
835     if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
836       if (FD->isDefined())
837         continue;
838       if (FD->isExternallyVisible() &&
839           !isExternalWithNoLinkageType(FD) &&
840           !FD->getMostRecentDecl()->isInlined() &&
841           !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
842         continue;
843       if (FD->getBuiltinID())
844         continue;
845     } else {
846       const auto *VD = cast<VarDecl>(ND);
847       if (VD->hasDefinition() != VarDecl::DeclarationOnly)
848         continue;
849       if (VD->isExternallyVisible() &&
850           !isExternalWithNoLinkageType(VD) &&
851           !VD->getMostRecentDecl()->isInline() &&
852           !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
853         continue;
854 
855       // Skip VarDecls that lack formal definitions but which we know are in
856       // fact defined somewhere.
857       if (VD->isKnownToBeDefined())
858         continue;
859     }
860 
861     Undefined.push_back(std::make_pair(ND, UndefinedUse.second));
862   }
863 }
864 
865 /// checkUndefinedButUsed - Check for undefined objects with internal linkage
866 /// or that are inline.
867 static void checkUndefinedButUsed(Sema &S) {
868   if (S.UndefinedButUsed.empty()) return;
869 
870   // Collect all the still-undefined entities with internal linkage.
871   SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
872   S.getUndefinedButUsed(Undefined);
873   S.UndefinedButUsed.clear();
874   if (Undefined.empty()) return;
875 
876   for (const auto &Undef : Undefined) {
877     ValueDecl *VD = cast<ValueDecl>(Undef.first);
878     SourceLocation UseLoc = Undef.second;
879 
880     if (S.isExternalWithNoLinkageType(VD)) {
881       // C++ [basic.link]p8:
882       //   A type without linkage shall not be used as the type of a variable
883       //   or function with external linkage unless
884       //    -- the entity has C language linkage
885       //    -- the entity is not odr-used or is defined in the same TU
886       //
887       // As an extension, accept this in cases where the type is externally
888       // visible, since the function or variable actually can be defined in
889       // another translation unit in that case.
890       S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage())
891                                     ? diag::ext_undefined_internal_type
892                                     : diag::err_undefined_internal_type)
893         << isa<VarDecl>(VD) << VD;
894     } else if (!VD->isExternallyVisible()) {
895       // FIXME: We can promote this to an error. The function or variable can't
896       // be defined anywhere else, so the program must necessarily violate the
897       // one definition rule.
898       bool IsImplicitBase = false;
899       if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) {
900         auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
901         if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
902                           llvm::omp::TraitProperty::
903                               implementation_extension_disable_implicit_base)) {
904           const auto *Func = cast<FunctionDecl>(
905               cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl());
906           IsImplicitBase = BaseD->isImplicit() &&
907                            Func->getIdentifier()->isMangledOpenMPVariantName();
908         }
909       }
910       if (!S.getLangOpts().OpenMP || !IsImplicitBase)
911         S.Diag(VD->getLocation(), diag::warn_undefined_internal)
912             << isa<VarDecl>(VD) << VD;
913     } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
914       (void)FD;
915       assert(FD->getMostRecentDecl()->isInlined() &&
916              "used object requires definition but isn't inline or internal?");
917       // FIXME: This is ill-formed; we should reject.
918       S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD;
919     } else {
920       assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
921              "used var requires definition but isn't inline or internal?");
922       S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD;
923     }
924     if (UseLoc.isValid())
925       S.Diag(UseLoc, diag::note_used_here);
926   }
927 }
928 
929 void Sema::LoadExternalWeakUndeclaredIdentifiers() {
930   if (!ExternalSource)
931     return;
932 
933   SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
934   ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
935   for (auto &WeakID : WeakIDs)
936     (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second);
937 }
938 
939 
940 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
941 
942 /// Returns true, if all methods and nested classes of the given
943 /// CXXRecordDecl are defined in this translation unit.
944 ///
945 /// Should only be called from ActOnEndOfTranslationUnit so that all
946 /// definitions are actually read.
947 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
948                                             RecordCompleteMap &MNCComplete) {
949   RecordCompleteMap::iterator Cache = MNCComplete.find(RD);
950   if (Cache != MNCComplete.end())
951     return Cache->second;
952   if (!RD->isCompleteDefinition())
953     return false;
954   bool Complete = true;
955   for (DeclContext::decl_iterator I = RD->decls_begin(),
956                                   E = RD->decls_end();
957        I != E && Complete; ++I) {
958     if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
959       Complete = M->isDefined() || M->isDefaulted() ||
960                  (M->isPure() && !isa<CXXDestructorDecl>(M));
961     else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
962       // If the template function is marked as late template parsed at this
963       // point, it has not been instantiated and therefore we have not
964       // performed semantic analysis on it yet, so we cannot know if the type
965       // can be considered complete.
966       Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
967                   F->getTemplatedDecl()->isDefined();
968     else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) {
969       if (R->isInjectedClassName())
970         continue;
971       if (R->hasDefinition())
972         Complete = MethodsAndNestedClassesComplete(R->getDefinition(),
973                                                    MNCComplete);
974       else
975         Complete = false;
976     }
977   }
978   MNCComplete[RD] = Complete;
979   return Complete;
980 }
981 
982 /// Returns true, if the given CXXRecordDecl is fully defined in this
983 /// translation unit, i.e. all methods are defined or pure virtual and all
984 /// friends, friend functions and nested classes are fully defined in this
985 /// translation unit.
986 ///
987 /// Should only be called from ActOnEndOfTranslationUnit so that all
988 /// definitions are actually read.
989 static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
990                                  RecordCompleteMap &RecordsComplete,
991                                  RecordCompleteMap &MNCComplete) {
992   RecordCompleteMap::iterator Cache = RecordsComplete.find(RD);
993   if (Cache != RecordsComplete.end())
994     return Cache->second;
995   bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
996   for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
997                                       E = RD->friend_end();
998        I != E && Complete; ++I) {
999     // Check if friend classes and methods are complete.
1000     if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
1001       // Friend classes are available as the TypeSourceInfo of the FriendDecl.
1002       if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
1003         Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete);
1004       else
1005         Complete = false;
1006     } else {
1007       // Friend functions are available through the NamedDecl of FriendDecl.
1008       if (const FunctionDecl *FD =
1009           dyn_cast<FunctionDecl>((*I)->getFriendDecl()))
1010         Complete = FD->isDefined();
1011       else
1012         // This is a template friend, give up.
1013         Complete = false;
1014     }
1015   }
1016   RecordsComplete[RD] = Complete;
1017   return Complete;
1018 }
1019 
1020 void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1021   if (ExternalSource)
1022     ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1023         UnusedLocalTypedefNameCandidates);
1024   for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1025     if (TD->isReferenced())
1026       continue;
1027     Diag(TD->getLocation(), diag::warn_unused_local_typedef)
1028         << isa<TypeAliasDecl>(TD) << TD->getDeclName();
1029   }
1030   UnusedLocalTypedefNameCandidates.clear();
1031 }
1032 
1033 /// This is called before the very first declaration in the translation unit
1034 /// is parsed. Note that the ASTContext may have already injected some
1035 /// declarations.
1036 void Sema::ActOnStartOfTranslationUnit() {
1037   if (getLangOpts().CPlusPlusModules &&
1038       getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1039     HandleStartOfHeaderUnit();
1040 }
1041 
1042 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1043   // No explicit actions are required at the end of the global module fragment.
1044   if (Kind == TUFragmentKind::Global)
1045     return;
1046 
1047   // Transfer late parsed template instantiations over to the pending template
1048   // instantiation list. During normal compilation, the late template parser
1049   // will be installed and instantiating these templates will succeed.
1050   //
1051   // If we are building a TU prefix for serialization, it is also safe to
1052   // transfer these over, even though they are not parsed. The end of the TU
1053   // should be outside of any eager template instantiation scope, so when this
1054   // AST is deserialized, these templates will not be parsed until the end of
1055   // the combined TU.
1056   PendingInstantiations.insert(PendingInstantiations.end(),
1057                                LateParsedInstantiations.begin(),
1058                                LateParsedInstantiations.end());
1059   LateParsedInstantiations.clear();
1060 
1061   // If DefinedUsedVTables ends up marking any virtual member functions it
1062   // might lead to more pending template instantiations, which we then need
1063   // to instantiate.
1064   DefineUsedVTables();
1065 
1066   // C++: Perform implicit template instantiations.
1067   //
1068   // FIXME: When we perform these implicit instantiations, we do not
1069   // carefully keep track of the point of instantiation (C++ [temp.point]).
1070   // This means that name lookup that occurs within the template
1071   // instantiation will always happen at the end of the translation unit,
1072   // so it will find some names that are not required to be found. This is
1073   // valid, but we could do better by diagnosing if an instantiation uses a
1074   // name that was not visible at its first point of instantiation.
1075   if (ExternalSource) {
1076     // Load pending instantiations from the external source.
1077     SmallVector<PendingImplicitInstantiation, 4> Pending;
1078     ExternalSource->ReadPendingInstantiations(Pending);
1079     for (auto PII : Pending)
1080       if (auto Func = dyn_cast<FunctionDecl>(PII.first))
1081         Func->setInstantiationIsPending(true);
1082     PendingInstantiations.insert(PendingInstantiations.begin(),
1083                                  Pending.begin(), Pending.end());
1084   }
1085 
1086   {
1087     llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1088     PerformPendingInstantiations();
1089   }
1090 
1091   emitDeferredDiags();
1092 
1093   assert(LateParsedInstantiations.empty() &&
1094          "end of TU template instantiation should not create more "
1095          "late-parsed templates");
1096 
1097   // Report diagnostics for uncorrected delayed typos. Ideally all of them
1098   // should have been corrected by that time, but it is very hard to cover all
1099   // cases in practice.
1100   for (const auto &Typo : DelayedTypos) {
1101     // We pass an empty TypoCorrection to indicate no correction was performed.
1102     Typo.second.DiagHandler(TypoCorrection());
1103   }
1104   DelayedTypos.clear();
1105 }
1106 
1107 /// ActOnEndOfTranslationUnit - This is called at the very end of the
1108 /// translation unit when EOF is reached and all but the top-level scope is
1109 /// popped.
1110 void Sema::ActOnEndOfTranslationUnit() {
1111   assert(DelayedDiagnostics.getCurrentPool() == nullptr
1112          && "reached end of translation unit with a pool attached?");
1113 
1114   // If code completion is enabled, don't perform any end-of-translation-unit
1115   // work.
1116   if (PP.isCodeCompletionEnabled())
1117     return;
1118 
1119   // Complete translation units and modules define vtables and perform implicit
1120   // instantiations. PCH files do not.
1121   if (TUKind != TU_Prefix) {
1122     DiagnoseUseOfUnimplementedSelectors();
1123 
1124     ActOnEndOfTranslationUnitFragment(
1125         !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1126                                      Module::PrivateModuleFragment
1127             ? TUFragmentKind::Private
1128             : TUFragmentKind::Normal);
1129 
1130     if (LateTemplateParserCleanup)
1131       LateTemplateParserCleanup(OpaqueParser);
1132 
1133     CheckDelayedMemberExceptionSpecs();
1134   } else {
1135     // If we are building a TU prefix for serialization, it is safe to transfer
1136     // these over, even though they are not parsed. The end of the TU should be
1137     // outside of any eager template instantiation scope, so when this AST is
1138     // deserialized, these templates will not be parsed until the end of the
1139     // combined TU.
1140     PendingInstantiations.insert(PendingInstantiations.end(),
1141                                  LateParsedInstantiations.begin(),
1142                                  LateParsedInstantiations.end());
1143     LateParsedInstantiations.clear();
1144 
1145     if (LangOpts.PCHInstantiateTemplates) {
1146       llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1147       PerformPendingInstantiations();
1148     }
1149   }
1150 
1151   DiagnoseUnterminatedPragmaAlignPack();
1152   DiagnoseUnterminatedPragmaAttribute();
1153   DiagnoseUnterminatedOpenMPDeclareTarget();
1154 
1155   // All delayed member exception specs should be checked or we end up accepting
1156   // incompatible declarations.
1157   assert(DelayedOverridingExceptionSpecChecks.empty());
1158   assert(DelayedEquivalentExceptionSpecChecks.empty());
1159 
1160   // All dllexport classes should have been processed already.
1161   assert(DelayedDllExportClasses.empty());
1162   assert(DelayedDllExportMemberFunctions.empty());
1163 
1164   // Remove file scoped decls that turned out to be used.
1165   UnusedFileScopedDecls.erase(
1166       std::remove_if(UnusedFileScopedDecls.begin(nullptr, true),
1167                      UnusedFileScopedDecls.end(),
1168                      [this](const DeclaratorDecl *DD) {
1169                        return ShouldRemoveFromUnused(this, DD);
1170                      }),
1171       UnusedFileScopedDecls.end());
1172 
1173   if (TUKind == TU_Prefix) {
1174     // Translation unit prefixes don't need any of the checking below.
1175     if (!PP.isIncrementalProcessingEnabled())
1176       TUScope = nullptr;
1177     return;
1178   }
1179 
1180   // Check for #pragma weak identifiers that were never declared
1181   LoadExternalWeakUndeclaredIdentifiers();
1182   for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1183     if (WeakIDs.second.empty())
1184       continue;
1185 
1186     Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(),
1187                                       LookupOrdinaryName);
1188     if (PrevDecl != nullptr &&
1189         !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
1190       for (const auto &WI : WeakIDs.second)
1191         Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
1192             << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
1193     else
1194       for (const auto &WI : WeakIDs.second)
1195         Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
1196             << WeakIDs.first;
1197   }
1198 
1199   if (LangOpts.CPlusPlus11 &&
1200       !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
1201     CheckDelegatingCtorCycles();
1202 
1203   if (!Diags.hasErrorOccurred()) {
1204     if (ExternalSource)
1205       ExternalSource->ReadUndefinedButUsed(UndefinedButUsed);
1206     checkUndefinedButUsed(*this);
1207   }
1208 
1209   // A global-module-fragment is only permitted within a module unit.
1210   bool DiagnosedMissingModuleDeclaration = false;
1211   if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1212                                    Module::ExplicitGlobalModuleFragment) {
1213     Diag(ModuleScopes.back().BeginLoc,
1214          diag::err_module_declaration_missing_after_global_module_introducer);
1215     DiagnosedMissingModuleDeclaration = true;
1216   }
1217 
1218   if (TUKind == TU_Module) {
1219     // If we are building a module interface unit, we need to have seen the
1220     // module declaration by now.
1221     if (getLangOpts().getCompilingModule() ==
1222             LangOptions::CMK_ModuleInterface &&
1223         !isCurrentModulePurview() && !DiagnosedMissingModuleDeclaration) {
1224       // FIXME: Make a better guess as to where to put the module declaration.
1225       Diag(getSourceManager().getLocForStartOfFile(
1226                getSourceManager().getMainFileID()),
1227            diag::err_module_declaration_missing);
1228     }
1229 
1230     // If we are building a module, resolve all of the exported declarations
1231     // now.
1232     if (Module *CurrentModule = PP.getCurrentModule()) {
1233       ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1234 
1235       SmallVector<Module *, 2> Stack;
1236       Stack.push_back(CurrentModule);
1237       while (!Stack.empty()) {
1238         Module *Mod = Stack.pop_back_val();
1239 
1240         // Resolve the exported declarations and conflicts.
1241         // FIXME: Actually complain, once we figure out how to teach the
1242         // diagnostic client to deal with complaints in the module map at this
1243         // point.
1244         ModMap.resolveExports(Mod, /*Complain=*/false);
1245         ModMap.resolveUses(Mod, /*Complain=*/false);
1246         ModMap.resolveConflicts(Mod, /*Complain=*/false);
1247 
1248         // Queue the submodules, so their exports will also be resolved.
1249         auto SubmodulesRange = Mod->submodules();
1250         Stack.append(SubmodulesRange.begin(), SubmodulesRange.end());
1251       }
1252     }
1253 
1254     // Now we can decide whether the modules we're building need an initializer.
1255     if (Module *CurrentModule = getCurrentModule();
1256         CurrentModule && CurrentModule->isInterfaceOrPartition()) {
1257       auto DoesModNeedInit = [this](Module *M) {
1258         if (!getASTContext().getModuleInitializers(M).empty())
1259           return true;
1260         for (auto [Exported, _] : M->Exports)
1261           if (Exported->isNamedModuleInterfaceHasInit())
1262             return true;
1263         for (Module *I : M->Imports)
1264           if (I->isNamedModuleInterfaceHasInit())
1265             return true;
1266 
1267         return false;
1268       };
1269 
1270       CurrentModule->NamedModuleHasInit =
1271           DoesModNeedInit(CurrentModule) ||
1272           llvm::any_of(CurrentModule->submodules(),
1273                        [&](auto *SubM) { return DoesModNeedInit(SubM); });
1274     }
1275 
1276     // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1277     // modules when they are built, not every time they are used.
1278     emitAndClearUnusedLocalTypedefWarnings();
1279   }
1280 
1281   // C++ standard modules. Diagnose cases where a function is declared inline
1282   // in the module purview but has no definition before the end of the TU or
1283   // the start of a Private Module Fragment (if one is present).
1284   if (!PendingInlineFuncDecls.empty()) {
1285     for (auto *D : PendingInlineFuncDecls) {
1286       if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1287         bool DefInPMF = false;
1288         if (auto *FDD = FD->getDefinition()) {
1289           DefInPMF = FDD->getOwningModule()->isPrivateModule();
1290           if (!DefInPMF)
1291             continue;
1292         }
1293         Diag(FD->getLocation(), diag::err_export_inline_not_defined)
1294             << DefInPMF;
1295         // If we have a PMF it should be at the end of the ModuleScopes.
1296         if (DefInPMF &&
1297             ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1298           Diag(ModuleScopes.back().BeginLoc,
1299                diag::note_private_module_fragment);
1300         }
1301       }
1302     }
1303     PendingInlineFuncDecls.clear();
1304   }
1305 
1306   // C99 6.9.2p2:
1307   //   A declaration of an identifier for an object that has file
1308   //   scope without an initializer, and without a storage-class
1309   //   specifier or with the storage-class specifier static,
1310   //   constitutes a tentative definition. If a translation unit
1311   //   contains one or more tentative definitions for an identifier,
1312   //   and the translation unit contains no external definition for
1313   //   that identifier, then the behavior is exactly as if the
1314   //   translation unit contains a file scope declaration of that
1315   //   identifier, with the composite type as of the end of the
1316   //   translation unit, with an initializer equal to 0.
1317   llvm::SmallSet<VarDecl *, 32> Seen;
1318   for (TentativeDefinitionsType::iterator
1319            T = TentativeDefinitions.begin(ExternalSource.get()),
1320            TEnd = TentativeDefinitions.end();
1321        T != TEnd; ++T) {
1322     VarDecl *VD = (*T)->getActingDefinition();
1323 
1324     // If the tentative definition was completed, getActingDefinition() returns
1325     // null. If we've already seen this variable before, insert()'s second
1326     // return value is false.
1327     if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second)
1328       continue;
1329 
1330     if (const IncompleteArrayType *ArrayT
1331         = Context.getAsIncompleteArrayType(VD->getType())) {
1332       // Set the length of the array to 1 (C99 6.9.2p5).
1333       Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
1334       llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
1335       QualType T = Context.getConstantArrayType(
1336           ArrayT->getElementType(), One, nullptr, ArraySizeModifier::Normal, 0);
1337       VD->setType(T);
1338     } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
1339                                    diag::err_tentative_def_incomplete_type))
1340       VD->setInvalidDecl();
1341 
1342     // No initialization is performed for a tentative definition.
1343     CheckCompleteVariableDeclaration(VD);
1344 
1345     // Notify the consumer that we've completed a tentative definition.
1346     if (!VD->isInvalidDecl())
1347       Consumer.CompleteTentativeDefinition(VD);
1348   }
1349 
1350   for (auto *D : ExternalDeclarations) {
1351     if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1352       continue;
1353 
1354     Consumer.CompleteExternalDeclaration(D);
1355   }
1356 
1357   // If there were errors, disable 'unused' warnings since they will mostly be
1358   // noise. Don't warn for a use from a module: either we should warn on all
1359   // file-scope declarations in modules or not at all, but whether the
1360   // declaration is used is immaterial.
1361   if (!Diags.hasErrorOccurred() && TUKind != TU_Module) {
1362     // Output warning for unused file scoped decls.
1363     for (UnusedFileScopedDeclsType::iterator
1364              I = UnusedFileScopedDecls.begin(ExternalSource.get()),
1365              E = UnusedFileScopedDecls.end();
1366          I != E; ++I) {
1367       if (ShouldRemoveFromUnused(this, *I))
1368         continue;
1369 
1370       if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
1371         const FunctionDecl *DiagD;
1372         if (!FD->hasBody(DiagD))
1373           DiagD = FD;
1374         if (DiagD->isDeleted())
1375           continue; // Deleted functions are supposed to be unused.
1376         SourceRange DiagRange = DiagD->getLocation();
1377         if (const ASTTemplateArgumentListInfo *ASTTAL =
1378                 DiagD->getTemplateSpecializationArgsAsWritten())
1379           DiagRange.setEnd(ASTTAL->RAngleLoc);
1380         if (DiagD->isReferenced()) {
1381           if (isa<CXXMethodDecl>(DiagD))
1382             Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
1383                 << DiagD << DiagRange;
1384           else {
1385             if (FD->getStorageClass() == SC_Static &&
1386                 !FD->isInlineSpecified() &&
1387                 !SourceMgr.isInMainFile(
1388                    SourceMgr.getExpansionLoc(FD->getLocation())))
1389               Diag(DiagD->getLocation(),
1390                    diag::warn_unneeded_static_internal_decl)
1391                   << DiagD << DiagRange;
1392             else
1393               Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1394                   << /*function=*/0 << DiagD << DiagRange;
1395           }
1396         } else {
1397           if (FD->getDescribedFunctionTemplate())
1398             Diag(DiagD->getLocation(), diag::warn_unused_template)
1399                 << /*function=*/0 << DiagD << DiagRange;
1400           else
1401             Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
1402                                            ? diag::warn_unused_member_function
1403                                            : diag::warn_unused_function)
1404                 << DiagD << DiagRange;
1405         }
1406       } else {
1407         const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
1408         if (!DiagD)
1409           DiagD = cast<VarDecl>(*I);
1410         SourceRange DiagRange = DiagD->getLocation();
1411         if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(DiagD)) {
1412           if (const ASTTemplateArgumentListInfo *ASTTAL =
1413                   VTSD->getTemplateArgsInfo())
1414             DiagRange.setEnd(ASTTAL->RAngleLoc);
1415         }
1416         if (DiagD->isReferenced()) {
1417           Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1418               << /*variable=*/1 << DiagD << DiagRange;
1419         } else if (DiagD->getDescribedVarTemplate()) {
1420           Diag(DiagD->getLocation(), diag::warn_unused_template)
1421               << /*variable=*/1 << DiagD << DiagRange;
1422         } else if (DiagD->getType().isConstQualified()) {
1423           const SourceManager &SM = SourceMgr;
1424           if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
1425               !PP.getLangOpts().IsHeaderFile)
1426             Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
1427                 << DiagD << DiagRange;
1428         } else {
1429           Diag(DiagD->getLocation(), diag::warn_unused_variable)
1430               << DiagD << DiagRange;
1431         }
1432       }
1433     }
1434 
1435     emitAndClearUnusedLocalTypedefWarnings();
1436   }
1437 
1438   if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
1439     // FIXME: Load additional unused private field candidates from the external
1440     // source.
1441     RecordCompleteMap RecordsComplete;
1442     RecordCompleteMap MNCComplete;
1443     for (const NamedDecl *D : UnusedPrivateFields) {
1444       const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
1445       if (RD && !RD->isUnion() &&
1446           IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1447         Diag(D->getLocation(), diag::warn_unused_private_field)
1448               << D->getDeclName();
1449       }
1450     }
1451   }
1452 
1453   if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
1454     if (ExternalSource)
1455       ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1456     for (const auto &DeletedFieldInfo : DeleteExprs) {
1457       for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1458         AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first,
1459                                   DeleteExprLoc.second);
1460       }
1461     }
1462   }
1463 
1464   AnalysisWarnings.IssueWarnings(Context.getTranslationUnitDecl());
1465 
1466   // Check we've noticed that we're no longer parsing the initializer for every
1467   // variable. If we miss cases, then at best we have a performance issue and
1468   // at worst a rejects-valid bug.
1469   assert(ParsingInitForAutoVars.empty() &&
1470          "Didn't unmark var as having its initializer parsed");
1471 
1472   if (!PP.isIncrementalProcessingEnabled())
1473     TUScope = nullptr;
1474 }
1475 
1476 
1477 //===----------------------------------------------------------------------===//
1478 // Helper functions.
1479 //===----------------------------------------------------------------------===//
1480 
1481 DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
1482   DeclContext *DC = CurContext;
1483 
1484   while (true) {
1485     if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) ||
1486         isa<RequiresExprBodyDecl>(DC)) {
1487       DC = DC->getParent();
1488     } else if (!AllowLambda && isa<CXXMethodDecl>(DC) &&
1489                cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
1490                cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
1491       DC = DC->getParent()->getParent();
1492     } else break;
1493   }
1494 
1495   return DC;
1496 }
1497 
1498 /// getCurFunctionDecl - If inside of a function body, this returns a pointer
1499 /// to the function decl for the function being parsed.  If we're currently
1500 /// in a 'block', this returns the containing context.
1501 FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
1502   DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1503   return dyn_cast<FunctionDecl>(DC);
1504 }
1505 
1506 ObjCMethodDecl *Sema::getCurMethodDecl() {
1507   DeclContext *DC = getFunctionLevelDeclContext();
1508   while (isa<RecordDecl>(DC))
1509     DC = DC->getParent();
1510   return dyn_cast<ObjCMethodDecl>(DC);
1511 }
1512 
1513 NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
1514   DeclContext *DC = getFunctionLevelDeclContext();
1515   if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
1516     return cast<NamedDecl>(DC);
1517   return nullptr;
1518 }
1519 
1520 LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1521   if (getLangOpts().OpenCL)
1522     return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1523   return LangAS::Default;
1524 }
1525 
1526 void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
1527   // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1528   // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1529   // been made more painfully obvious by the refactor that introduced this
1530   // function, but it is possible that the incoming argument can be
1531   // eliminated. If it truly cannot be (for example, there is some reentrancy
1532   // issue I am not seeing yet), then there should at least be a clarifying
1533   // comment somewhere.
1534   if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) {
1535     switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
1536               Diags.getCurrentDiagID())) {
1537     case DiagnosticIDs::SFINAE_Report:
1538       // We'll report the diagnostic below.
1539       break;
1540 
1541     case DiagnosticIDs::SFINAE_SubstitutionFailure:
1542       // Count this failure so that we know that template argument deduction
1543       // has failed.
1544       ++NumSFINAEErrors;
1545 
1546       // Make a copy of this suppressed diagnostic and store it with the
1547       // template-deduction information.
1548       if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1549         Diagnostic DiagInfo(&Diags);
1550         (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1551                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1552       }
1553 
1554       Diags.setLastDiagnosticIgnored(true);
1555       Diags.Clear();
1556       return;
1557 
1558     case DiagnosticIDs::SFINAE_AccessControl: {
1559       // Per C++ Core Issue 1170, access control is part of SFINAE.
1560       // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
1561       // make access control a part of SFINAE for the purposes of checking
1562       // type traits.
1563       if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
1564         break;
1565 
1566       SourceLocation Loc = Diags.getCurrentDiagLoc();
1567 
1568       // Suppress this diagnostic.
1569       ++NumSFINAEErrors;
1570 
1571       // Make a copy of this suppressed diagnostic and store it with the
1572       // template-deduction information.
1573       if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1574         Diagnostic DiagInfo(&Diags);
1575         (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1576                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1577       }
1578 
1579       Diags.setLastDiagnosticIgnored(true);
1580       Diags.Clear();
1581 
1582       // Now the diagnostic state is clear, produce a C++98 compatibility
1583       // warning.
1584       Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
1585 
1586       // The last diagnostic which Sema produced was ignored. Suppress any
1587       // notes attached to it.
1588       Diags.setLastDiagnosticIgnored(true);
1589       return;
1590     }
1591 
1592     case DiagnosticIDs::SFINAE_Suppress:
1593       // Make a copy of this suppressed diagnostic and store it with the
1594       // template-deduction information;
1595       if (*Info) {
1596         Diagnostic DiagInfo(&Diags);
1597         (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
1598                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1599       }
1600 
1601       // Suppress this diagnostic.
1602       Diags.setLastDiagnosticIgnored(true);
1603       Diags.Clear();
1604       return;
1605     }
1606   }
1607 
1608   // Copy the diagnostic printing policy over the ASTContext printing policy.
1609   // TODO: Stop doing that.  See: https://reviews.llvm.org/D45093#1090292
1610   Context.setPrintingPolicy(getPrintingPolicy());
1611 
1612   // Emit the diagnostic.
1613   if (!Diags.EmitCurrentDiagnostic())
1614     return;
1615 
1616   // If this is not a note, and we're in a template instantiation
1617   // that is different from the last template instantiation where
1618   // we emitted an error, print a template instantiation
1619   // backtrace.
1620   if (!DiagnosticIDs::isBuiltinNote(DiagID))
1621     PrintContextStack();
1622 }
1623 
1624 Sema::SemaDiagnosticBuilder
1625 Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) {
1626   return Diag(Loc, PD.getDiagID(), DeferHint) << PD;
1627 }
1628 
1629 bool Sema::hasUncompilableErrorOccurred() const {
1630   if (getDiagnostics().hasUncompilableErrorOccurred())
1631     return true;
1632   auto *FD = dyn_cast<FunctionDecl>(CurContext);
1633   if (!FD)
1634     return false;
1635   auto Loc = DeviceDeferredDiags.find(FD);
1636   if (Loc == DeviceDeferredDiags.end())
1637     return false;
1638   for (auto PDAt : Loc->second) {
1639     if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID()))
1640       return true;
1641   }
1642   return false;
1643 }
1644 
1645 // Print notes showing how we can reach FD starting from an a priori
1646 // known-callable function.
1647 static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
1648   auto FnIt = S.DeviceKnownEmittedFns.find(FD);
1649   while (FnIt != S.DeviceKnownEmittedFns.end()) {
1650     // Respect error limit.
1651     if (S.Diags.hasFatalErrorOccurred())
1652       return;
1653     DiagnosticBuilder Builder(
1654         S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
1655     Builder << FnIt->second.FD;
1656     FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD);
1657   }
1658 }
1659 
1660 namespace {
1661 
1662 /// Helper class that emits deferred diagnostic messages if an entity directly
1663 /// or indirectly using the function that causes the deferred diagnostic
1664 /// messages is known to be emitted.
1665 ///
1666 /// During parsing of AST, certain diagnostic messages are recorded as deferred
1667 /// diagnostics since it is unknown whether the functions containing such
1668 /// diagnostics will be emitted. A list of potentially emitted functions and
1669 /// variables that may potentially trigger emission of functions are also
1670 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1671 /// by each function to emit deferred diagnostics.
1672 ///
1673 /// During the visit, certain OpenMP directives or initializer of variables
1674 /// with certain OpenMP attributes will cause subsequent visiting of any
1675 /// functions enter a state which is called OpenMP device context in this
1676 /// implementation. The state is exited when the directive or initializer is
1677 /// exited. This state can change the emission states of subsequent uses
1678 /// of functions.
1679 ///
1680 /// Conceptually the functions or variables to be visited form a use graph
1681 /// where the parent node uses the child node. At any point of the visit,
1682 /// the tree nodes traversed from the tree root to the current node form a use
1683 /// stack. The emission state of the current node depends on two factors:
1684 ///    1. the emission state of the root node
1685 ///    2. whether the current node is in OpenMP device context
1686 /// If the function is decided to be emitted, its contained deferred diagnostics
1687 /// are emitted, together with the information about the use stack.
1688 ///
1689 class DeferredDiagnosticsEmitter
1690     : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1691 public:
1692   typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1693 
1694   // Whether the function is already in the current use-path.
1695   llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1696 
1697   // The current use-path.
1698   llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1699 
1700   // Whether the visiting of the function has been done. Done[0] is for the
1701   // case not in OpenMP device context. Done[1] is for the case in OpenMP
1702   // device context. We need two sets because diagnostics emission may be
1703   // different depending on whether it is in OpenMP device context.
1704   llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1705 
1706   // Emission state of the root node of the current use graph.
1707   bool ShouldEmitRootNode;
1708 
1709   // Current OpenMP device context level. It is initialized to 0 and each
1710   // entering of device context increases it by 1 and each exit decreases
1711   // it by 1. Non-zero value indicates it is currently in device context.
1712   unsigned InOMPDeviceContext;
1713 
1714   DeferredDiagnosticsEmitter(Sema &S)
1715       : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1716 
1717   bool shouldVisitDiscardedStmt() const { return false; }
1718 
1719   void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1720     ++InOMPDeviceContext;
1721     Inherited::VisitOMPTargetDirective(Node);
1722     --InOMPDeviceContext;
1723   }
1724 
1725   void visitUsedDecl(SourceLocation Loc, Decl *D) {
1726     if (isa<VarDecl>(D))
1727       return;
1728     if (auto *FD = dyn_cast<FunctionDecl>(D))
1729       checkFunc(Loc, FD);
1730     else
1731       Inherited::visitUsedDecl(Loc, D);
1732   }
1733 
1734   void checkVar(VarDecl *VD) {
1735     assert(VD->isFileVarDecl() &&
1736            "Should only check file-scope variables");
1737     if (auto *Init = VD->getInit()) {
1738       auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
1739       bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
1740                              *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
1741       if (IsDev)
1742         ++InOMPDeviceContext;
1743       this->Visit(Init);
1744       if (IsDev)
1745         --InOMPDeviceContext;
1746     }
1747   }
1748 
1749   void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
1750     auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
1751     FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1752     if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1753         S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD))
1754       return;
1755     // Finalize analysis of OpenMP-specific constructs.
1756     if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1757         (ShouldEmitRootNode || InOMPDeviceContext))
1758       S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
1759     if (Caller)
1760       S.DeviceKnownEmittedFns[FD] = {Caller, Loc};
1761     // Always emit deferred diagnostics for the direct users. This does not
1762     // lead to explosion of diagnostics since each user is visited at most
1763     // twice.
1764     if (ShouldEmitRootNode || InOMPDeviceContext)
1765       emitDeferredDiags(FD, Caller);
1766     // Do not revisit a function if the function body has been completely
1767     // visited before.
1768     if (!Done.insert(FD).second)
1769       return;
1770     InUsePath.insert(FD);
1771     UsePath.push_back(FD);
1772     if (auto *S = FD->getBody()) {
1773       this->Visit(S);
1774     }
1775     UsePath.pop_back();
1776     InUsePath.erase(FD);
1777   }
1778 
1779   void checkRecordedDecl(Decl *D) {
1780     if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1781       ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) ==
1782                            Sema::FunctionEmissionStatus::Emitted;
1783       checkFunc(SourceLocation(), FD);
1784     } else
1785       checkVar(cast<VarDecl>(D));
1786   }
1787 
1788   // Emit any deferred diagnostics for FD
1789   void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
1790     auto It = S.DeviceDeferredDiags.find(FD);
1791     if (It == S.DeviceDeferredDiags.end())
1792       return;
1793     bool HasWarningOrError = false;
1794     bool FirstDiag = true;
1795     for (PartialDiagnosticAt &PDAt : It->second) {
1796       // Respect error limit.
1797       if (S.Diags.hasFatalErrorOccurred())
1798         return;
1799       const SourceLocation &Loc = PDAt.first;
1800       const PartialDiagnostic &PD = PDAt.second;
1801       HasWarningOrError |=
1802           S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >=
1803           DiagnosticsEngine::Warning;
1804       {
1805         DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
1806         PD.Emit(Builder);
1807       }
1808       // Emit the note on the first diagnostic in case too many diagnostics
1809       // cause the note not emitted.
1810       if (FirstDiag && HasWarningOrError && ShowCallStack) {
1811         emitCallStackNotes(S, FD);
1812         FirstDiag = false;
1813       }
1814     }
1815   }
1816 };
1817 } // namespace
1818 
1819 void Sema::emitDeferredDiags() {
1820   if (ExternalSource)
1821     ExternalSource->ReadDeclsToCheckForDeferredDiags(
1822         DeclsToCheckForDeferredDiags);
1823 
1824   if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
1825       DeclsToCheckForDeferredDiags.empty())
1826     return;
1827 
1828   DeferredDiagnosticsEmitter DDE(*this);
1829   for (auto *D : DeclsToCheckForDeferredDiags)
1830     DDE.checkRecordedDecl(D);
1831 }
1832 
1833 // In CUDA, there are some constructs which may appear in semantically-valid
1834 // code, but trigger errors if we ever generate code for the function in which
1835 // they appear.  Essentially every construct you're not allowed to use on the
1836 // device falls into this category, because you are allowed to use these
1837 // constructs in a __host__ __device__ function, but only if that function is
1838 // never codegen'ed on the device.
1839 //
1840 // To handle semantic checking for these constructs, we keep track of the set of
1841 // functions we know will be emitted, either because we could tell a priori that
1842 // they would be emitted, or because they were transitively called by a
1843 // known-emitted function.
1844 //
1845 // We also keep a partial call graph of which not-known-emitted functions call
1846 // which other not-known-emitted functions.
1847 //
1848 // When we see something which is illegal if the current function is emitted
1849 // (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
1850 // CheckCUDACall), we first check if the current function is known-emitted.  If
1851 // so, we immediately output the diagnostic.
1852 //
1853 // Otherwise, we "defer" the diagnostic.  It sits in Sema::DeviceDeferredDiags
1854 // until we discover that the function is known-emitted, at which point we take
1855 // it out of this map and emit the diagnostic.
1856 
1857 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
1858                                                    unsigned DiagID,
1859                                                    const FunctionDecl *Fn,
1860                                                    Sema &S)
1861     : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
1862       ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
1863   switch (K) {
1864   case K_Nop:
1865     break;
1866   case K_Immediate:
1867   case K_ImmediateWithCallStack:
1868     ImmediateDiag.emplace(
1869         ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
1870     break;
1871   case K_Deferred:
1872     assert(Fn && "Must have a function to attach the deferred diag to.");
1873     auto &Diags = S.DeviceDeferredDiags[Fn];
1874     PartialDiagId.emplace(Diags.size());
1875     Diags.emplace_back(Loc, S.PDiag(DiagID));
1876     break;
1877   }
1878 }
1879 
1880 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
1881     : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
1882       ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
1883       PartialDiagId(D.PartialDiagId) {
1884   // Clean the previous diagnostics.
1885   D.ShowCallStack = false;
1886   D.ImmediateDiag.reset();
1887   D.PartialDiagId.reset();
1888 }
1889 
1890 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
1891   if (ImmediateDiag) {
1892     // Emit our diagnostic and, if it was a warning or error, output a callstack
1893     // if Fn isn't a priori known-emitted.
1894     bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
1895                                 DiagID, Loc) >= DiagnosticsEngine::Warning;
1896     ImmediateDiag.reset(); // Emit the immediate diag.
1897     if (IsWarningOrError && ShowCallStack)
1898       emitCallStackNotes(S, Fn);
1899   } else {
1900     assert((!PartialDiagId || ShowCallStack) &&
1901            "Must always show call stack for deferred diags.");
1902   }
1903 }
1904 
1905 Sema::SemaDiagnosticBuilder
1906 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
1907   FD = FD ? FD : getCurFunctionDecl();
1908   if (LangOpts.OpenMP)
1909     return LangOpts.OpenMPIsTargetDevice
1910                ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
1911                : diagIfOpenMPHostCode(Loc, DiagID, FD);
1912   if (getLangOpts().CUDA)
1913     return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
1914                                       : CUDADiagIfHostCode(Loc, DiagID);
1915 
1916   if (getLangOpts().SYCLIsDevice)
1917     return SYCLDiagIfDeviceCode(Loc, DiagID);
1918 
1919   return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
1920                                FD, *this);
1921 }
1922 
1923 Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
1924                                        bool DeferHint) {
1925   bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
1926   bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag &&
1927                      DiagnosticIDs::isDeferrable(DiagID) &&
1928                      (DeferHint || DeferDiags || !IsError);
1929   auto SetIsLastErrorImmediate = [&](bool Flag) {
1930     if (IsError)
1931       IsLastErrorImmediate = Flag;
1932   };
1933   if (!ShouldDefer) {
1934     SetIsLastErrorImmediate(true);
1935     return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc,
1936                                  DiagID, getCurFunctionDecl(), *this);
1937   }
1938 
1939   SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice
1940                                  ? CUDADiagIfDeviceCode(Loc, DiagID)
1941                                  : CUDADiagIfHostCode(Loc, DiagID);
1942   SetIsLastErrorImmediate(DB.isImmediate());
1943   return DB;
1944 }
1945 
1946 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
1947   if (isUnevaluatedContext() || Ty.isNull())
1948     return;
1949 
1950   // The original idea behind checkTypeSupport function is that unused
1951   // declarations can be replaced with an array of bytes of the same size during
1952   // codegen, such replacement doesn't seem to be possible for types without
1953   // constant byte size like zero length arrays. So, do a deep check for SYCL.
1954   if (D && LangOpts.SYCLIsDevice) {
1955     llvm::DenseSet<QualType> Visited;
1956     deepTypeCheckForSYCLDevice(Loc, Visited, D);
1957   }
1958 
1959   Decl *C = cast<Decl>(getCurLexicalContext());
1960 
1961   // Memcpy operations for structs containing a member with unsupported type
1962   // are ok, though.
1963   if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) {
1964     if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
1965         MD->isTrivial())
1966       return;
1967 
1968     if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD))
1969       if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
1970         return;
1971   }
1972 
1973   // Try to associate errors with the lexical context, if that is a function, or
1974   // the value declaration otherwise.
1975   const FunctionDecl *FD = isa<FunctionDecl>(C)
1976                                ? cast<FunctionDecl>(C)
1977                                : dyn_cast_or_null<FunctionDecl>(D);
1978 
1979   auto CheckDeviceType = [&](QualType Ty) {
1980     if (Ty->isDependentType())
1981       return;
1982 
1983     if (Ty->isBitIntType()) {
1984       if (!Context.getTargetInfo().hasBitIntType()) {
1985         PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
1986         if (D)
1987           PD << D;
1988         else
1989           PD << "expression";
1990         targetDiag(Loc, PD, FD)
1991             << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
1992             << Ty << Context.getTargetInfo().getTriple().str();
1993       }
1994       return;
1995     }
1996 
1997     // Check if we are dealing with two 'long double' but with different
1998     // semantics.
1999     bool LongDoubleMismatched = false;
2000     if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) {
2001       const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty);
2002       if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
2003            !Context.getTargetInfo().hasFloat128Type()) ||
2004           (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
2005            !Context.getTargetInfo().hasIbm128Type()))
2006         LongDoubleMismatched = true;
2007     }
2008 
2009     if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
2010         (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
2011         (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
2012         (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
2013          !Context.getTargetInfo().hasInt128Type()) ||
2014         (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
2015          !LangOpts.CUDAIsDevice) ||
2016         LongDoubleMismatched) {
2017       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2018       if (D)
2019         PD << D;
2020       else
2021         PD << "expression";
2022 
2023       if (targetDiag(Loc, PD, FD)
2024           << true /*show bit size*/
2025           << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
2026           << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
2027         if (D)
2028           D->setInvalidDecl();
2029       }
2030       if (D)
2031         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2032     }
2033   };
2034 
2035   auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
2036     if (LangOpts.SYCLIsDevice ||
2037         (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
2038         LangOpts.CUDAIsDevice)
2039       CheckDeviceType(Ty);
2040 
2041     QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2042     const TargetInfo &TI = Context.getTargetInfo();
2043     if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2044       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2045       if (D)
2046         PD << D;
2047       else
2048         PD << "expression";
2049 
2050       if (Diag(Loc, PD, FD)
2051           << false /*show bit size*/ << 0 << Ty << false /*return*/
2052           << TI.getTriple().str()) {
2053         if (D)
2054           D->setInvalidDecl();
2055       }
2056       if (D)
2057         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2058     }
2059 
2060     bool IsDouble = UnqualTy == Context.DoubleTy;
2061     bool IsFloat = UnqualTy == Context.FloatTy;
2062     if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2063       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2064       if (D)
2065         PD << D;
2066       else
2067         PD << "expression";
2068 
2069       if (Diag(Loc, PD, FD)
2070           << false /*show bit size*/ << 0 << Ty << true /*return*/
2071           << TI.getTriple().str()) {
2072         if (D)
2073           D->setInvalidDecl();
2074       }
2075       if (D)
2076         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2077     }
2078 
2079     if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType())
2080       checkRVVTypeSupport(Ty, Loc, D);
2081 
2082     // Don't allow SVE types in functions without a SVE target.
2083     if (Ty->isSVESizelessBuiltinType() && FD && FD->hasBody()) {
2084       llvm::StringMap<bool> CallerFeatureMap;
2085       Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2086       if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap) &&
2087           !Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
2088         Diag(D->getLocation(), diag::err_sve_vector_in_non_sve_target) << Ty;
2089     }
2090   };
2091 
2092   CheckType(Ty);
2093   if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) {
2094     for (const auto &ParamTy : FPTy->param_types())
2095       CheckType(ParamTy);
2096     CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2097   }
2098   if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty))
2099     CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2100 }
2101 
2102 /// Looks through the macro-expansion chain for the given
2103 /// location, looking for a macro expansion with the given name.
2104 /// If one is found, returns true and sets the location to that
2105 /// expansion loc.
2106 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2107   SourceLocation loc = locref;
2108   if (!loc.isMacroID()) return false;
2109 
2110   // There's no good way right now to look at the intermediate
2111   // expansions, so just jump to the expansion location.
2112   loc = getSourceManager().getExpansionLoc(loc);
2113 
2114   // If that's written with the name, stop here.
2115   SmallString<16> buffer;
2116   if (getPreprocessor().getSpelling(loc, buffer) == name) {
2117     locref = loc;
2118     return true;
2119   }
2120   return false;
2121 }
2122 
2123 /// Determines the active Scope associated with the given declaration
2124 /// context.
2125 ///
2126 /// This routine maps a declaration context to the active Scope object that
2127 /// represents that declaration context in the parser. It is typically used
2128 /// from "scope-less" code (e.g., template instantiation, lazy creation of
2129 /// declarations) that injects a name for name-lookup purposes and, therefore,
2130 /// must update the Scope.
2131 ///
2132 /// \returns The scope corresponding to the given declaraion context, or NULL
2133 /// if no such scope is open.
2134 Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2135 
2136   if (!Ctx)
2137     return nullptr;
2138 
2139   Ctx = Ctx->getPrimaryContext();
2140   for (Scope *S = getCurScope(); S; S = S->getParent()) {
2141     // Ignore scopes that cannot have declarations. This is important for
2142     // out-of-line definitions of static class members.
2143     if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2144       if (DeclContext *Entity = S->getEntity())
2145         if (Ctx == Entity->getPrimaryContext())
2146           return S;
2147   }
2148 
2149   return nullptr;
2150 }
2151 
2152 /// Enter a new function scope
2153 void Sema::PushFunctionScope() {
2154   if (FunctionScopes.empty() && CachedFunctionScope) {
2155     // Use CachedFunctionScope to avoid allocating memory when possible.
2156     CachedFunctionScope->Clear();
2157     FunctionScopes.push_back(CachedFunctionScope.release());
2158   } else {
2159     FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
2160   }
2161   if (LangOpts.OpenMP)
2162     pushOpenMPFunctionRegion();
2163 }
2164 
2165 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2166   FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
2167                                               BlockScope, Block));
2168   CapturingFunctionScopes++;
2169 }
2170 
2171 LambdaScopeInfo *Sema::PushLambdaScope() {
2172   LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2173   FunctionScopes.push_back(LSI);
2174   CapturingFunctionScopes++;
2175   return LSI;
2176 }
2177 
2178 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2179   if (LambdaScopeInfo *const LSI = getCurLambda()) {
2180     LSI->AutoTemplateParameterDepth = Depth;
2181     return;
2182   }
2183   llvm_unreachable(
2184       "Remove assertion if intentionally called in a non-lambda context.");
2185 }
2186 
2187 // Check that the type of the VarDecl has an accessible copy constructor and
2188 // resolve its destructor's exception specification.
2189 // This also performs initialization of block variables when they are moved
2190 // to the heap. It uses the same rules as applicable for implicit moves
2191 // according to the C++ standard in effect ([class.copy.elision]p3).
2192 static void checkEscapingByref(VarDecl *VD, Sema &S) {
2193   QualType T = VD->getType();
2194   EnterExpressionEvaluationContext scope(
2195       S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2196   SourceLocation Loc = VD->getLocation();
2197   Expr *VarRef =
2198       new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2199   ExprResult Result;
2200   auto IE = InitializedEntity::InitializeBlock(Loc, T);
2201   if (S.getLangOpts().CPlusPlus23) {
2202     auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr,
2203                                        VK_XValue, FPOptionsOverride());
2204     Result = S.PerformCopyInitialization(IE, SourceLocation(), E);
2205   } else {
2206     Result = S.PerformMoveOrCopyInitialization(
2207         IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible},
2208         VarRef);
2209   }
2210 
2211   if (!Result.isInvalid()) {
2212     Result = S.MaybeCreateExprWithCleanups(Result);
2213     Expr *Init = Result.getAs<Expr>();
2214     S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init));
2215   }
2216 
2217   // The destructor's exception specification is needed when IRGen generates
2218   // block copy/destroy functions. Resolve it here.
2219   if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2220     if (CXXDestructorDecl *DD = RD->getDestructor()) {
2221       auto *FPT = DD->getType()->getAs<FunctionProtoType>();
2222       S.ResolveExceptionSpec(Loc, FPT);
2223     }
2224 }
2225 
2226 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2227   // Set the EscapingByref flag of __block variables captured by
2228   // escaping blocks.
2229   for (const BlockDecl *BD : FSI.Blocks) {
2230     for (const BlockDecl::Capture &BC : BD->captures()) {
2231       VarDecl *VD = BC.getVariable();
2232       if (VD->hasAttr<BlocksAttr>()) {
2233         // Nothing to do if this is a __block variable captured by a
2234         // non-escaping block.
2235         if (BD->doesNotEscape())
2236           continue;
2237         VD->setEscapingByref();
2238       }
2239       // Check whether the captured variable is or contains an object of
2240       // non-trivial C union type.
2241       QualType CapType = BC.getVariable()->getType();
2242       if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2243           CapType.hasNonTrivialToPrimitiveCopyCUnion())
2244         S.checkNonTrivialCUnion(BC.getVariable()->getType(),
2245                                 BD->getCaretLocation(),
2246                                 Sema::NTCUC_BlockCapture,
2247                                 Sema::NTCUK_Destruct|Sema::NTCUK_Copy);
2248     }
2249   }
2250 
2251   for (VarDecl *VD : FSI.ByrefBlockVars) {
2252     // __block variables might require us to capture a copy-initializer.
2253     if (!VD->isEscapingByref())
2254       continue;
2255     // It's currently invalid to ever have a __block variable with an
2256     // array type; should we diagnose that here?
2257     // Regardless, we don't want to ignore array nesting when
2258     // constructing this copy.
2259     if (VD->getType()->isStructureOrClassType())
2260       checkEscapingByref(VD, S);
2261   }
2262 }
2263 
2264 /// Pop a function (or block or lambda or captured region) scope from the stack.
2265 ///
2266 /// \param WP The warning policy to use for CFG-based warnings, or null if such
2267 ///        warnings should not be produced.
2268 /// \param D The declaration corresponding to this function scope, if producing
2269 ///        CFG-based warnings.
2270 /// \param BlockType The type of the block expression, if D is a BlockDecl.
2271 Sema::PoppedFunctionScopePtr
2272 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
2273                            const Decl *D, QualType BlockType) {
2274   assert(!FunctionScopes.empty() && "mismatched push/pop!");
2275 
2276   markEscapingByrefs(*FunctionScopes.back(), *this);
2277 
2278   PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2279                                PoppedFunctionScopeDeleter(this));
2280 
2281   if (LangOpts.OpenMP)
2282     popOpenMPFunctionRegion(Scope.get());
2283 
2284   // Issue any analysis-based warnings.
2285   if (WP && D)
2286     AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType);
2287   else
2288     for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2289       Diag(PUD.Loc, PUD.PD);
2290 
2291   return Scope;
2292 }
2293 
2294 void Sema::PoppedFunctionScopeDeleter::
2295 operator()(sema::FunctionScopeInfo *Scope) const {
2296   if (!Scope->isPlainFunction())
2297     Self->CapturingFunctionScopes--;
2298   // Stash the function scope for later reuse if it's for a normal function.
2299   if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2300     Self->CachedFunctionScope.reset(Scope);
2301   else
2302     delete Scope;
2303 }
2304 
2305 void Sema::PushCompoundScope(bool IsStmtExpr) {
2306   getCurFunction()->CompoundScopes.push_back(
2307       CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2308 }
2309 
2310 void Sema::PopCompoundScope() {
2311   FunctionScopeInfo *CurFunction = getCurFunction();
2312   assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2313 
2314   CurFunction->CompoundScopes.pop_back();
2315 }
2316 
2317 /// Determine whether any errors occurred within this function/method/
2318 /// block.
2319 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2320   return getCurFunction()->hasUnrecoverableErrorOccurred();
2321 }
2322 
2323 void Sema::setFunctionHasBranchIntoScope() {
2324   if (!FunctionScopes.empty())
2325     FunctionScopes.back()->setHasBranchIntoScope();
2326 }
2327 
2328 void Sema::setFunctionHasBranchProtectedScope() {
2329   if (!FunctionScopes.empty())
2330     FunctionScopes.back()->setHasBranchProtectedScope();
2331 }
2332 
2333 void Sema::setFunctionHasIndirectGoto() {
2334   if (!FunctionScopes.empty())
2335     FunctionScopes.back()->setHasIndirectGoto();
2336 }
2337 
2338 void Sema::setFunctionHasMustTail() {
2339   if (!FunctionScopes.empty())
2340     FunctionScopes.back()->setHasMustTail();
2341 }
2342 
2343 BlockScopeInfo *Sema::getCurBlock() {
2344   if (FunctionScopes.empty())
2345     return nullptr;
2346 
2347   auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back());
2348   if (CurBSI && CurBSI->TheDecl &&
2349       !CurBSI->TheDecl->Encloses(CurContext)) {
2350     // We have switched contexts due to template instantiation.
2351     assert(!CodeSynthesisContexts.empty());
2352     return nullptr;
2353   }
2354 
2355   return CurBSI;
2356 }
2357 
2358 FunctionScopeInfo *Sema::getEnclosingFunction() const {
2359   if (FunctionScopes.empty())
2360     return nullptr;
2361 
2362   for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2363     if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
2364       continue;
2365     return FunctionScopes[e];
2366   }
2367   return nullptr;
2368 }
2369 
2370 LambdaScopeInfo *Sema::getEnclosingLambda() const {
2371   for (auto *Scope : llvm::reverse(FunctionScopes)) {
2372     if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) {
2373       if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
2374           LSI->AfterParameterList) {
2375         // We have switched contexts due to template instantiation.
2376         // FIXME: We should swap out the FunctionScopes during code synthesis
2377         // so that we don't need to check for this.
2378         assert(!CodeSynthesisContexts.empty());
2379         return nullptr;
2380       }
2381       return LSI;
2382     }
2383   }
2384   return nullptr;
2385 }
2386 
2387 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2388   if (FunctionScopes.empty())
2389     return nullptr;
2390 
2391   auto I = FunctionScopes.rbegin();
2392   if (IgnoreNonLambdaCapturingScope) {
2393     auto E = FunctionScopes.rend();
2394     while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I))
2395       ++I;
2396     if (I == E)
2397       return nullptr;
2398   }
2399   auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I);
2400   if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
2401       !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) {
2402     // We have switched contexts due to template instantiation.
2403     assert(!CodeSynthesisContexts.empty());
2404     return nullptr;
2405   }
2406 
2407   return CurLSI;
2408 }
2409 
2410 // We have a generic lambda if we parsed auto parameters, or we have
2411 // an associated template parameter list.
2412 LambdaScopeInfo *Sema::getCurGenericLambda() {
2413   if (LambdaScopeInfo *LSI =  getCurLambda()) {
2414     return (LSI->TemplateParams.size() ||
2415                     LSI->GLTemplateParameterList) ? LSI : nullptr;
2416   }
2417   return nullptr;
2418 }
2419 
2420 
2421 void Sema::ActOnComment(SourceRange Comment) {
2422   if (!LangOpts.RetainCommentsFromSystemHeaders &&
2423       SourceMgr.isInSystemHeader(Comment.getBegin()))
2424     return;
2425   RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2426   if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
2427     SourceRange MagicMarkerRange(Comment.getBegin(),
2428                                  Comment.getBegin().getLocWithOffset(3));
2429     StringRef MagicMarkerText;
2430     switch (RC.getKind()) {
2431     case RawComment::RCK_OrdinaryBCPL:
2432       MagicMarkerText = "///<";
2433       break;
2434     case RawComment::RCK_OrdinaryC:
2435       MagicMarkerText = "/**<";
2436       break;
2437     case RawComment::RCK_Invalid:
2438       // FIXME: are there other scenarios that could produce an invalid
2439       // raw comment here?
2440       Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment);
2441       return;
2442     default:
2443       llvm_unreachable("if this is an almost Doxygen comment, "
2444                        "it should be ordinary");
2445     }
2446     Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
2447       FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
2448   }
2449   Context.addComment(RC);
2450 }
2451 
2452 // Pin this vtable to this file.
2453 ExternalSemaSource::~ExternalSemaSource() {}
2454 char ExternalSemaSource::ID;
2455 
2456 void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
2457 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2458 
2459 void ExternalSemaSource::ReadKnownNamespaces(
2460                            SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2461 }
2462 
2463 void ExternalSemaSource::ReadUndefinedButUsed(
2464     llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2465 
2466 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2467     FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2468 
2469 /// Figure out if an expression could be turned into a call.
2470 ///
2471 /// Use this when trying to recover from an error where the programmer may have
2472 /// written just the name of a function instead of actually calling it.
2473 ///
2474 /// \param E - The expression to examine.
2475 /// \param ZeroArgCallReturnTy - If the expression can be turned into a call
2476 ///  with no arguments, this parameter is set to the type returned by such a
2477 ///  call; otherwise, it is set to an empty QualType.
2478 /// \param OverloadSet - If the expression is an overloaded function
2479 ///  name, this parameter is populated with the decls of the various overloads.
2480 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2481                          UnresolvedSetImpl &OverloadSet) {
2482   ZeroArgCallReturnTy = QualType();
2483   OverloadSet.clear();
2484 
2485   const OverloadExpr *Overloads = nullptr;
2486   bool IsMemExpr = false;
2487   if (E.getType() == Context.OverloadTy) {
2488     OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E));
2489 
2490     // Ignore overloads that are pointer-to-member constants.
2491     if (FR.HasFormOfMemberPointer)
2492       return false;
2493 
2494     Overloads = FR.Expression;
2495   } else if (E.getType() == Context.BoundMemberTy) {
2496     Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens());
2497     IsMemExpr = true;
2498   }
2499 
2500   bool Ambiguous = false;
2501   bool IsMV = false;
2502 
2503   if (Overloads) {
2504     for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2505          DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2506       OverloadSet.addDecl(*it);
2507 
2508       // Check whether the function is a non-template, non-member which takes no
2509       // arguments.
2510       if (IsMemExpr)
2511         continue;
2512       if (const FunctionDecl *OverloadDecl
2513             = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) {
2514         if (OverloadDecl->getMinRequiredArguments() == 0) {
2515           if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2516               (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2517                           OverloadDecl->isCPUSpecificMultiVersion()))) {
2518             ZeroArgCallReturnTy = QualType();
2519             Ambiguous = true;
2520           } else {
2521             ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2522             IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2523                    OverloadDecl->isCPUSpecificMultiVersion();
2524           }
2525         }
2526       }
2527     }
2528 
2529     // If it's not a member, use better machinery to try to resolve the call
2530     if (!IsMemExpr)
2531       return !ZeroArgCallReturnTy.isNull();
2532   }
2533 
2534   // Attempt to call the member with no arguments - this will correctly handle
2535   // member templates with defaults/deduction of template arguments, overloads
2536   // with default arguments, etc.
2537   if (IsMemExpr && !E.isTypeDependent()) {
2538     Sema::TentativeAnalysisScope Trap(*this);
2539     ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(),
2540                                              std::nullopt, SourceLocation());
2541     if (R.isUsable()) {
2542       ZeroArgCallReturnTy = R.get()->getType();
2543       return true;
2544     }
2545     return false;
2546   }
2547 
2548   if (const auto *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
2549     if (const auto *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
2550       if (Fun->getMinRequiredArguments() == 0)
2551         ZeroArgCallReturnTy = Fun->getReturnType();
2552       return true;
2553     }
2554   }
2555 
2556   // We don't have an expression that's convenient to get a FunctionDecl from,
2557   // but we can at least check if the type is "function of 0 arguments".
2558   QualType ExprTy = E.getType();
2559   const FunctionType *FunTy = nullptr;
2560   QualType PointeeTy = ExprTy->getPointeeType();
2561   if (!PointeeTy.isNull())
2562     FunTy = PointeeTy->getAs<FunctionType>();
2563   if (!FunTy)
2564     FunTy = ExprTy->getAs<FunctionType>();
2565 
2566   if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(FunTy)) {
2567     if (FPT->getNumParams() == 0)
2568       ZeroArgCallReturnTy = FunTy->getReturnType();
2569     return true;
2570   }
2571   return false;
2572 }
2573 
2574 /// Give notes for a set of overloads.
2575 ///
2576 /// A companion to tryExprAsCall. In cases when the name that the programmer
2577 /// wrote was an overloaded function, we may be able to make some guesses about
2578 /// plausible overloads based on their return types; such guesses can be handed
2579 /// off to this method to be emitted as notes.
2580 ///
2581 /// \param Overloads - The overloads to note.
2582 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2583 ///  -fshow-overloads=best, this is the location to attach to the note about too
2584 ///  many candidates. Typically this will be the location of the original
2585 ///  ill-formed expression.
2586 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2587                           const SourceLocation FinalNoteLoc) {
2588   unsigned ShownOverloads = 0;
2589   unsigned SuppressedOverloads = 0;
2590   for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2591        DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2592     if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2593       ++SuppressedOverloads;
2594       continue;
2595     }
2596 
2597     const NamedDecl *Fn = (*It)->getUnderlyingDecl();
2598     // Don't print overloads for non-default multiversioned functions.
2599     if (const auto *FD = Fn->getAsFunction()) {
2600       if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2601           !FD->getAttr<TargetAttr>()->isDefaultVersion())
2602         continue;
2603       if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2604           !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2605         continue;
2606     }
2607     S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
2608     ++ShownOverloads;
2609   }
2610 
2611   S.Diags.overloadCandidatesShown(ShownOverloads);
2612 
2613   if (SuppressedOverloads)
2614     S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
2615       << SuppressedOverloads;
2616 }
2617 
2618 static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2619                                    const UnresolvedSetImpl &Overloads,
2620                                    bool (*IsPlausibleResult)(QualType)) {
2621   if (!IsPlausibleResult)
2622     return noteOverloads(S, Overloads, Loc);
2623 
2624   UnresolvedSet<2> PlausibleOverloads;
2625   for (OverloadExpr::decls_iterator It = Overloads.begin(),
2626          DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2627     const auto *OverloadDecl = cast<FunctionDecl>(*It);
2628     QualType OverloadResultTy = OverloadDecl->getReturnType();
2629     if (IsPlausibleResult(OverloadResultTy))
2630       PlausibleOverloads.addDecl(It.getDecl());
2631   }
2632   noteOverloads(S, PlausibleOverloads, Loc);
2633 }
2634 
2635 /// Determine whether the given expression can be called by just
2636 /// putting parentheses after it.  Notably, expressions with unary
2637 /// operators can't be because the unary operator will start parsing
2638 /// outside the call.
2639 static bool IsCallableWithAppend(const Expr *E) {
2640   E = E->IgnoreImplicit();
2641   return (!isa<CStyleCastExpr>(E) &&
2642           !isa<UnaryOperator>(E) &&
2643           !isa<BinaryOperator>(E) &&
2644           !isa<CXXOperatorCallExpr>(E));
2645 }
2646 
2647 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2648   if (const auto *UO = dyn_cast<UnaryOperator>(E))
2649     E = UO->getSubExpr();
2650 
2651   if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
2652     if (ULE->getNumDecls() == 0)
2653       return false;
2654 
2655     const NamedDecl *ND = *ULE->decls_begin();
2656     if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2657       return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2658   }
2659   return false;
2660 }
2661 
2662 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2663                                 bool ForceComplain,
2664                                 bool (*IsPlausibleResult)(QualType)) {
2665   SourceLocation Loc = E.get()->getExprLoc();
2666   SourceRange Range = E.get()->getSourceRange();
2667   UnresolvedSet<4> Overloads;
2668 
2669   // If this is a SFINAE context, don't try anything that might trigger ADL
2670   // prematurely.
2671   if (!isSFINAEContext()) {
2672     QualType ZeroArgCallTy;
2673     if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
2674         !ZeroArgCallTy.isNull() &&
2675         (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2676       // At this point, we know E is potentially callable with 0
2677       // arguments and that it returns something of a reasonable type,
2678       // so we can emit a fixit and carry on pretending that E was
2679       // actually a CallExpr.
2680       SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
2681       bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2682       Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2683                     << (IsCallableWithAppend(E.get())
2684                             ? FixItHint::CreateInsertion(ParenInsertionLoc,
2685                                                          "()")
2686                             : FixItHint());
2687       if (!IsMV)
2688         notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2689 
2690       // FIXME: Try this before emitting the fixit, and suppress diagnostics
2691       // while doing so.
2692       E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), std::nullopt,
2693                         Range.getEnd().getLocWithOffset(1));
2694       return true;
2695     }
2696   }
2697   if (!ForceComplain) return false;
2698 
2699   bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2700   Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2701   if (!IsMV)
2702     notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2703   E = ExprError();
2704   return true;
2705 }
2706 
2707 IdentifierInfo *Sema::getSuperIdentifier() const {
2708   if (!Ident_super)
2709     Ident_super = &Context.Idents.get("super");
2710   return Ident_super;
2711 }
2712 
2713 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2714                                    CapturedRegionKind K,
2715                                    unsigned OpenMPCaptureLevel) {
2716   auto *CSI = new CapturedRegionScopeInfo(
2717       getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2718       (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0,
2719       OpenMPCaptureLevel);
2720   CSI->ReturnType = Context.VoidTy;
2721   FunctionScopes.push_back(CSI);
2722   CapturingFunctionScopes++;
2723 }
2724 
2725 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2726   if (FunctionScopes.empty())
2727     return nullptr;
2728 
2729   return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back());
2730 }
2731 
2732 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
2733 Sema::getMismatchingDeleteExpressions() const {
2734   return DeleteExprs;
2735 }
2736 
2737 Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2738     : S(S), OldFPFeaturesState(S.CurFPFeatures),
2739       OldOverrides(S.FpPragmaStack.CurrentValue),
2740       OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2741       OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2742 
2743 Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2744   S.CurFPFeatures = OldFPFeaturesState;
2745   S.FpPragmaStack.CurrentValue = OldOverrides;
2746   S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod);
2747 }
2748 
2749 bool Sema::isDeclaratorFunctionLike(Declarator &D) {
2750   assert(D.getCXXScopeSpec().isSet() &&
2751          "can only be called for qualified names");
2752 
2753   auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
2754                          LookupOrdinaryName, forRedeclarationInCurContext());
2755   DeclContext *DC = computeDeclContext(D.getCXXScopeSpec(),
2756                                        !D.getDeclSpec().isFriendSpecified());
2757   if (!DC)
2758     return false;
2759 
2760   LookupQualifiedName(LR, DC);
2761   bool Result = std::all_of(LR.begin(), LR.end(), [](Decl *Dcl) {
2762     if (NamedDecl *ND = dyn_cast<NamedDecl>(Dcl)) {
2763       ND = ND->getUnderlyingDecl();
2764       return isa<FunctionDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
2765              isa<UsingDecl>(ND);
2766     }
2767     return false;
2768   });
2769   return Result;
2770 }
2771