xref: /freebsd/contrib/llvm-project/clang/lib/Sema/Sema.cpp (revision 7937bfbc0ca53fe7cdd0d54414f9296e273a518e)
1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the actions class which performs semantic analysis and
10 // builds an AST out of a parse stream.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "UsedDeclVisitor.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/ASTDiagnostic.h"
17 #include "clang/AST/Decl.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/DeclFriend.h"
20 #include "clang/AST/DeclObjC.h"
21 #include "clang/AST/Expr.h"
22 #include "clang/AST/ExprCXX.h"
23 #include "clang/AST/PrettyDeclStackTrace.h"
24 #include "clang/AST/StmtCXX.h"
25 #include "clang/Basic/DarwinSDKInfo.h"
26 #include "clang/Basic/DiagnosticOptions.h"
27 #include "clang/Basic/PartialDiagnostic.h"
28 #include "clang/Basic/SourceManager.h"
29 #include "clang/Basic/Stack.h"
30 #include "clang/Basic/TargetInfo.h"
31 #include "clang/Lex/HeaderSearch.h"
32 #include "clang/Lex/HeaderSearchOptions.h"
33 #include "clang/Lex/Preprocessor.h"
34 #include "clang/Sema/CXXFieldCollector.h"
35 #include "clang/Sema/DelayedDiagnostic.h"
36 #include "clang/Sema/EnterExpressionEvaluationContext.h"
37 #include "clang/Sema/ExternalSemaSource.h"
38 #include "clang/Sema/Initialization.h"
39 #include "clang/Sema/MultiplexExternalSemaSource.h"
40 #include "clang/Sema/ObjCMethodList.h"
41 #include "clang/Sema/RISCVIntrinsicManager.h"
42 #include "clang/Sema/Scope.h"
43 #include "clang/Sema/ScopeInfo.h"
44 #include "clang/Sema/SemaAMDGPU.h"
45 #include "clang/Sema/SemaARM.h"
46 #include "clang/Sema/SemaAVR.h"
47 #include "clang/Sema/SemaBPF.h"
48 #include "clang/Sema/SemaCUDA.h"
49 #include "clang/Sema/SemaCodeCompletion.h"
50 #include "clang/Sema/SemaConsumer.h"
51 #include "clang/Sema/SemaHLSL.h"
52 #include "clang/Sema/SemaHexagon.h"
53 #include "clang/Sema/SemaInternal.h"
54 #include "clang/Sema/SemaLoongArch.h"
55 #include "clang/Sema/SemaM68k.h"
56 #include "clang/Sema/SemaMIPS.h"
57 #include "clang/Sema/SemaMSP430.h"
58 #include "clang/Sema/SemaNVPTX.h"
59 #include "clang/Sema/SemaObjC.h"
60 #include "clang/Sema/SemaOpenACC.h"
61 #include "clang/Sema/SemaOpenCL.h"
62 #include "clang/Sema/SemaOpenMP.h"
63 #include "clang/Sema/SemaPPC.h"
64 #include "clang/Sema/SemaPseudoObject.h"
65 #include "clang/Sema/SemaRISCV.h"
66 #include "clang/Sema/SemaSYCL.h"
67 #include "clang/Sema/SemaSwift.h"
68 #include "clang/Sema/SemaSystemZ.h"
69 #include "clang/Sema/SemaWasm.h"
70 #include "clang/Sema/SemaX86.h"
71 #include "clang/Sema/TemplateDeduction.h"
72 #include "clang/Sema/TemplateInstCallback.h"
73 #include "clang/Sema/TypoCorrection.h"
74 #include "llvm/ADT/DenseMap.h"
75 #include "llvm/ADT/STLExtras.h"
76 #include "llvm/ADT/SmallPtrSet.h"
77 #include "llvm/Support/TimeProfiler.h"
78 #include <optional>
79 
80 using namespace clang;
81 using namespace sema;
82 
83 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
84   return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts);
85 }
86 
87 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
88 
89 DarwinSDKInfo *
90 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
91                                               StringRef Platform) {
92   auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
93   if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
94     Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
95         << Platform;
96     WarnedDarwinSDKInfoMissing = true;
97   }
98   return SDKInfo;
99 }
100 
101 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
102   if (CachedDarwinSDKInfo)
103     return CachedDarwinSDKInfo->get();
104   auto SDKInfo = parseDarwinSDKInfo(
105       PP.getFileManager().getVirtualFileSystem(),
106       PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
107   if (SDKInfo && *SDKInfo) {
108     CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo));
109     return CachedDarwinSDKInfo->get();
110   }
111   if (!SDKInfo)
112     llvm::consumeError(SDKInfo.takeError());
113   CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
114   return nullptr;
115 }
116 
117 IdentifierInfo *Sema::InventAbbreviatedTemplateParameterTypeName(
118     const IdentifierInfo *ParamName, unsigned int Index) {
119   std::string InventedName;
120   llvm::raw_string_ostream OS(InventedName);
121 
122   if (!ParamName)
123     OS << "auto:" << Index + 1;
124   else
125     OS << ParamName->getName() << ":auto";
126 
127   OS.flush();
128   return &Context.Idents.get(OS.str());
129 }
130 
131 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
132                                        const Preprocessor &PP) {
133   PrintingPolicy Policy = Context.getPrintingPolicy();
134   // In diagnostics, we print _Bool as bool if the latter is defined as the
135   // former.
136   Policy.Bool = Context.getLangOpts().Bool;
137   if (!Policy.Bool) {
138     if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) {
139       Policy.Bool = BoolMacro->isObjectLike() &&
140                     BoolMacro->getNumTokens() == 1 &&
141                     BoolMacro->getReplacementToken(0).is(tok::kw__Bool);
142     }
143   }
144 
145   // Shorten the data output if needed
146   Policy.EntireContentsOfLargeArray = false;
147 
148   return Policy;
149 }
150 
151 void Sema::ActOnTranslationUnitScope(Scope *S) {
152   TUScope = S;
153   PushDeclContext(S, Context.getTranslationUnitDecl());
154 }
155 
156 namespace clang {
157 namespace sema {
158 
159 class SemaPPCallbacks : public PPCallbacks {
160   Sema *S = nullptr;
161   llvm::SmallVector<SourceLocation, 8> IncludeStack;
162   llvm::SmallVector<llvm::TimeTraceProfilerEntry *, 8> ProfilerStack;
163 
164 public:
165   void set(Sema &S) { this->S = &S; }
166 
167   void reset() { S = nullptr; }
168 
169   void FileChanged(SourceLocation Loc, FileChangeReason Reason,
170                    SrcMgr::CharacteristicKind FileType,
171                    FileID PrevFID) override {
172     if (!S)
173       return;
174     switch (Reason) {
175     case EnterFile: {
176       SourceManager &SM = S->getSourceManager();
177       SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc));
178       if (IncludeLoc.isValid()) {
179         if (llvm::timeTraceProfilerEnabled()) {
180           OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getFileID(Loc));
181           ProfilerStack.push_back(llvm::timeTraceAsyncProfilerBegin(
182               "Source", FE ? FE->getName() : StringRef("<unknown>")));
183         }
184 
185         IncludeStack.push_back(IncludeLoc);
186         S->DiagnoseNonDefaultPragmaAlignPack(
187             Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
188             IncludeLoc);
189       }
190       break;
191     }
192     case ExitFile:
193       if (!IncludeStack.empty()) {
194         if (llvm::timeTraceProfilerEnabled())
195           llvm::timeTraceProfilerEnd(ProfilerStack.pop_back_val());
196 
197         S->DiagnoseNonDefaultPragmaAlignPack(
198             Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
199             IncludeStack.pop_back_val());
200       }
201       break;
202     default:
203       break;
204     }
205   }
206 };
207 
208 } // end namespace sema
209 } // end namespace clang
210 
211 const unsigned Sema::MaxAlignmentExponent;
212 const uint64_t Sema::MaximumAlignment;
213 
214 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
215            TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
216     : SemaBase(*this), CollectStats(false), TUKind(TUKind),
217       CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
218       Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
219       SourceMgr(PP.getSourceManager()), APINotes(SourceMgr, LangOpts),
220       AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
221       LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr),
222       OpaqueParser(nullptr), CurContext(nullptr), ExternalSource(nullptr),
223       CurScope(nullptr), Ident_super(nullptr),
224       AMDGPUPtr(std::make_unique<SemaAMDGPU>(*this)),
225       ARMPtr(std::make_unique<SemaARM>(*this)),
226       AVRPtr(std::make_unique<SemaAVR>(*this)),
227       BPFPtr(std::make_unique<SemaBPF>(*this)),
228       CodeCompletionPtr(
229           std::make_unique<SemaCodeCompletion>(*this, CodeCompleter)),
230       CUDAPtr(std::make_unique<SemaCUDA>(*this)),
231       HLSLPtr(std::make_unique<SemaHLSL>(*this)),
232       HexagonPtr(std::make_unique<SemaHexagon>(*this)),
233       LoongArchPtr(std::make_unique<SemaLoongArch>(*this)),
234       M68kPtr(std::make_unique<SemaM68k>(*this)),
235       MIPSPtr(std::make_unique<SemaMIPS>(*this)),
236       MSP430Ptr(std::make_unique<SemaMSP430>(*this)),
237       NVPTXPtr(std::make_unique<SemaNVPTX>(*this)),
238       ObjCPtr(std::make_unique<SemaObjC>(*this)),
239       OpenACCPtr(std::make_unique<SemaOpenACC>(*this)),
240       OpenCLPtr(std::make_unique<SemaOpenCL>(*this)),
241       OpenMPPtr(std::make_unique<SemaOpenMP>(*this)),
242       PPCPtr(std::make_unique<SemaPPC>(*this)),
243       PseudoObjectPtr(std::make_unique<SemaPseudoObject>(*this)),
244       RISCVPtr(std::make_unique<SemaRISCV>(*this)),
245       SYCLPtr(std::make_unique<SemaSYCL>(*this)),
246       SwiftPtr(std::make_unique<SemaSwift>(*this)),
247       SystemZPtr(std::make_unique<SemaSystemZ>(*this)),
248       WasmPtr(std::make_unique<SemaWasm>(*this)),
249       X86Ptr(std::make_unique<SemaX86>(*this)),
250       MSPointerToMemberRepresentationMethod(
251           LangOpts.getMSPointerToMemberRepresentationMethod()),
252       MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()),
253       AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
254       DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
255       CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
256       FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
257       VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
258       StdCoroutineTraitsCache(nullptr), IdResolver(pp),
259       OriginalLexicalContext(nullptr), StdInitializerList(nullptr),
260       FullyCheckedComparisonCategories(
261           static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
262       StdSourceLocationImplDecl(nullptr), CXXTypeInfoDecl(nullptr),
263       GlobalNewDeleteDeclared(false), DisableTypoCorrection(false),
264       TyposCorrected(0), IsBuildingRecoveryCallExpr(false), NumSFINAEErrors(0),
265       AccessCheckingSFINAE(false), CurrentInstantiationScope(nullptr),
266       InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
267       ArgumentPackSubstitutionIndex(-1), SatisfactionCache(Context) {
268   assert(pp.TUKind == TUKind);
269   TUScope = nullptr;
270 
271   LoadedExternalKnownNamespaces = false;
272   for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
273     ObjC().NSNumberLiteralMethods[I] = nullptr;
274 
275   if (getLangOpts().ObjC)
276     ObjC().NSAPIObj.reset(new NSAPI(Context));
277 
278   if (getLangOpts().CPlusPlus)
279     FieldCollector.reset(new CXXFieldCollector());
280 
281   // Tell diagnostics how to render things from the AST library.
282   Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
283 
284   // This evaluation context exists to ensure that there's always at least one
285   // valid evaluation context available. It is never removed from the
286   // evaluation stack.
287   ExprEvalContexts.emplace_back(
288       ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
289       nullptr, ExpressionEvaluationContextRecord::EK_Other);
290 
291   // Initialization of data sharing attributes stack for OpenMP
292   OpenMP().InitDataSharingAttributesStack();
293 
294   std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
295       std::make_unique<sema::SemaPPCallbacks>();
296   SemaPPCallbackHandler = Callbacks.get();
297   PP.addPPCallbacks(std::move(Callbacks));
298   SemaPPCallbackHandler->set(*this);
299 
300   CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
301 }
302 
303 // Anchor Sema's type info to this TU.
304 void Sema::anchor() {}
305 
306 void Sema::addImplicitTypedef(StringRef Name, QualType T) {
307   DeclarationName DN = &Context.Idents.get(Name);
308   if (IdResolver.begin(DN) == IdResolver.end())
309     PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
310 }
311 
312 void Sema::Initialize() {
313   if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
314     SC->InitializeSema(*this);
315 
316   // Tell the external Sema source about this Sema object.
317   if (ExternalSemaSource *ExternalSema
318       = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
319     ExternalSema->InitializeSema(*this);
320 
321   // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
322   // will not be able to merge any duplicate __va_list_tag decls correctly.
323   VAListTagName = PP.getIdentifierInfo("__va_list_tag");
324 
325   if (!TUScope)
326     return;
327 
328   // Initialize predefined 128-bit integer types, if needed.
329   if (Context.getTargetInfo().hasInt128Type() ||
330       (Context.getAuxTargetInfo() &&
331        Context.getAuxTargetInfo()->hasInt128Type())) {
332     // If either of the 128-bit integer types are unavailable to name lookup,
333     // define them now.
334     DeclarationName Int128 = &Context.Idents.get("__int128_t");
335     if (IdResolver.begin(Int128) == IdResolver.end())
336       PushOnScopeChains(Context.getInt128Decl(), TUScope);
337 
338     DeclarationName UInt128 = &Context.Idents.get("__uint128_t");
339     if (IdResolver.begin(UInt128) == IdResolver.end())
340       PushOnScopeChains(Context.getUInt128Decl(), TUScope);
341   }
342 
343 
344   // Initialize predefined Objective-C types:
345   if (getLangOpts().ObjC) {
346     // If 'SEL' does not yet refer to any declarations, make it refer to the
347     // predefined 'SEL'.
348     DeclarationName SEL = &Context.Idents.get("SEL");
349     if (IdResolver.begin(SEL) == IdResolver.end())
350       PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
351 
352     // If 'id' does not yet refer to any declarations, make it refer to the
353     // predefined 'id'.
354     DeclarationName Id = &Context.Idents.get("id");
355     if (IdResolver.begin(Id) == IdResolver.end())
356       PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
357 
358     // Create the built-in typedef for 'Class'.
359     DeclarationName Class = &Context.Idents.get("Class");
360     if (IdResolver.begin(Class) == IdResolver.end())
361       PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
362 
363     // Create the built-in forward declaratino for 'Protocol'.
364     DeclarationName Protocol = &Context.Idents.get("Protocol");
365     if (IdResolver.begin(Protocol) == IdResolver.end())
366       PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
367   }
368 
369   // Create the internal type for the *StringMakeConstantString builtins.
370   DeclarationName ConstantString = &Context.Idents.get("__NSConstantString");
371   if (IdResolver.begin(ConstantString) == IdResolver.end())
372     PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope);
373 
374   // Initialize Microsoft "predefined C++ types".
375   if (getLangOpts().MSVCCompat) {
376     if (getLangOpts().CPlusPlus &&
377         IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
378       PushOnScopeChains(
379           Context.buildImplicitRecord("type_info", TagTypeKind::Class),
380           TUScope);
381 
382     addImplicitTypedef("size_t", Context.getSizeType());
383   }
384 
385   // Initialize predefined OpenCL types and supported extensions and (optional)
386   // core features.
387   if (getLangOpts().OpenCL) {
388     getOpenCLOptions().addSupport(
389         Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
390     addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
391     addImplicitTypedef("event_t", Context.OCLEventTy);
392     auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
393     if (OCLCompatibleVersion >= 200) {
394       if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
395         addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
396         addImplicitTypedef("queue_t", Context.OCLQueueTy);
397       }
398       if (getLangOpts().OpenCLPipes)
399         addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
400       addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
401       addImplicitTypedef("atomic_uint",
402                          Context.getAtomicType(Context.UnsignedIntTy));
403       addImplicitTypedef("atomic_float",
404                          Context.getAtomicType(Context.FloatTy));
405       // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
406       // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
407       addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy));
408 
409 
410       // OpenCL v2.0 s6.13.11.6:
411       // - The atomic_long and atomic_ulong types are supported if the
412       //   cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
413       //   extensions are supported.
414       // - The atomic_double type is only supported if double precision
415       //   is supported and the cl_khr_int64_base_atomics and
416       //   cl_khr_int64_extended_atomics extensions are supported.
417       // - If the device address space is 64-bits, the data types
418       //   atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
419       //   atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
420       //   cl_khr_int64_extended_atomics extensions are supported.
421 
422       auto AddPointerSizeDependentTypes = [&]() {
423         auto AtomicSizeT = Context.getAtomicType(Context.getSizeType());
424         auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType());
425         auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType());
426         auto AtomicPtrDiffT =
427             Context.getAtomicType(Context.getPointerDiffType());
428         addImplicitTypedef("atomic_size_t", AtomicSizeT);
429         addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT);
430         addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT);
431         addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT);
432       };
433 
434       if (Context.getTypeSize(Context.getSizeType()) == 32) {
435         AddPointerSizeDependentTypes();
436       }
437 
438       if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) {
439         auto AtomicHalfT = Context.getAtomicType(Context.HalfTy);
440         addImplicitTypedef("atomic_half", AtomicHalfT);
441       }
442 
443       std::vector<QualType> Atomic64BitTypes;
444       if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics",
445                                          getLangOpts()) &&
446           getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics",
447                                          getLangOpts())) {
448         if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) {
449           auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy);
450           addImplicitTypedef("atomic_double", AtomicDoubleT);
451           Atomic64BitTypes.push_back(AtomicDoubleT);
452         }
453         auto AtomicLongT = Context.getAtomicType(Context.LongTy);
454         auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy);
455         addImplicitTypedef("atomic_long", AtomicLongT);
456         addImplicitTypedef("atomic_ulong", AtomicULongT);
457 
458 
459         if (Context.getTypeSize(Context.getSizeType()) == 64) {
460           AddPointerSizeDependentTypes();
461         }
462       }
463     }
464 
465 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext)                                      \
466   if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) {                   \
467     addImplicitTypedef(#ExtType, Context.Id##Ty);                              \
468   }
469 #include "clang/Basic/OpenCLExtensionTypes.def"
470   }
471 
472   if (Context.getTargetInfo().hasAArch64SVETypes() ||
473       (Context.getAuxTargetInfo() &&
474        Context.getAuxTargetInfo()->hasAArch64SVETypes())) {
475 #define SVE_TYPE(Name, Id, SingletonId) \
476     addImplicitTypedef(Name, Context.SingletonId);
477 #include "clang/Basic/AArch64SVEACLETypes.def"
478   }
479 
480   if (Context.getTargetInfo().getTriple().isPPC64()) {
481 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
482       addImplicitTypedef(#Name, Context.Id##Ty);
483 #include "clang/Basic/PPCTypes.def"
484 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
485     addImplicitTypedef(#Name, Context.Id##Ty);
486 #include "clang/Basic/PPCTypes.def"
487   }
488 
489   if (Context.getTargetInfo().hasRISCVVTypes()) {
490 #define RVV_TYPE(Name, Id, SingletonId)                                        \
491   addImplicitTypedef(Name, Context.SingletonId);
492 #include "clang/Basic/RISCVVTypes.def"
493   }
494 
495   if (Context.getTargetInfo().getTriple().isWasm() &&
496       Context.getTargetInfo().hasFeature("reference-types")) {
497 #define WASM_TYPE(Name, Id, SingletonId)                                       \
498   addImplicitTypedef(Name, Context.SingletonId);
499 #include "clang/Basic/WebAssemblyReferenceTypes.def"
500   }
501 
502   if (Context.getTargetInfo().getTriple().isAMDGPU() ||
503       (Context.getAuxTargetInfo() &&
504        Context.getAuxTargetInfo()->getTriple().isAMDGPU())) {
505 #define AMDGPU_TYPE(Name, Id, SingletonId)                                     \
506   addImplicitTypedef(Name, Context.SingletonId);
507 #include "clang/Basic/AMDGPUTypes.def"
508   }
509 
510   if (Context.getTargetInfo().hasBuiltinMSVaList()) {
511     DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
512     if (IdResolver.begin(MSVaList) == IdResolver.end())
513       PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
514   }
515 
516   DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
517   if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
518     PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
519 }
520 
521 Sema::~Sema() {
522   assert(InstantiatingSpecializations.empty() &&
523          "failed to clean up an InstantiatingTemplate?");
524 
525   if (VisContext) FreeVisContext();
526 
527   // Kill all the active scopes.
528   for (sema::FunctionScopeInfo *FSI : FunctionScopes)
529     delete FSI;
530 
531   // Tell the SemaConsumer to forget about us; we're going out of scope.
532   if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
533     SC->ForgetSema();
534 
535   // Detach from the external Sema source.
536   if (ExternalSemaSource *ExternalSema
537         = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
538     ExternalSema->ForgetSema();
539 
540   // Delete cached satisfactions.
541   std::vector<ConstraintSatisfaction *> Satisfactions;
542   Satisfactions.reserve(SatisfactionCache.size());
543   for (auto &Node : SatisfactionCache)
544     Satisfactions.push_back(&Node);
545   for (auto *Node : Satisfactions)
546     delete Node;
547 
548   threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache);
549 
550   // Destroys data sharing attributes stack for OpenMP
551   OpenMP().DestroyDataSharingAttributesStack();
552 
553   // Detach from the PP callback handler which outlives Sema since it's owned
554   // by the preprocessor.
555   SemaPPCallbackHandler->reset();
556 }
557 
558 void Sema::warnStackExhausted(SourceLocation Loc) {
559   // Only warn about this once.
560   if (!WarnedStackExhausted) {
561     Diag(Loc, diag::warn_stack_exhausted);
562     WarnedStackExhausted = true;
563   }
564 }
565 
566 void Sema::runWithSufficientStackSpace(SourceLocation Loc,
567                                        llvm::function_ref<void()> Fn) {
568   clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn);
569 }
570 
571 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
572                                       UnavailableAttr::ImplicitReason reason) {
573   // If we're not in a function, it's an error.
574   FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
575   if (!fn) return false;
576 
577   // If we're in template instantiation, it's an error.
578   if (inTemplateInstantiation())
579     return false;
580 
581   // If that function's not in a system header, it's an error.
582   if (!Context.getSourceManager().isInSystemHeader(loc))
583     return false;
584 
585   // If the function is already unavailable, it's not an error.
586   if (fn->hasAttr<UnavailableAttr>()) return true;
587 
588   fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
589   return true;
590 }
591 
592 ASTMutationListener *Sema::getASTMutationListener() const {
593   return getASTConsumer().GetASTMutationListener();
594 }
595 
596 void Sema::addExternalSource(ExternalSemaSource *E) {
597   assert(E && "Cannot use with NULL ptr");
598 
599   if (!ExternalSource) {
600     ExternalSource = E;
601     return;
602   }
603 
604   if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(ExternalSource))
605     Ex->AddSource(E);
606   else
607     ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
608 }
609 
610 void Sema::PrintStats() const {
611   llvm::errs() << "\n*** Semantic Analysis Stats:\n";
612   llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
613 
614   BumpAlloc.PrintStats();
615   AnalysisWarnings.PrintStats();
616 }
617 
618 void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
619                                                QualType SrcType,
620                                                SourceLocation Loc) {
621   std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
622   if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
623                            *ExprNullability != NullabilityKind::NullableResult))
624     return;
625 
626   std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
627   if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
628     return;
629 
630   Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
631 }
632 
633 // Generate diagnostics when adding or removing effects in a type conversion.
634 void Sema::diagnoseFunctionEffectConversion(QualType DstType, QualType SrcType,
635                                             SourceLocation Loc) {
636   const auto SrcFX = FunctionEffectsRef::get(SrcType);
637   const auto DstFX = FunctionEffectsRef::get(DstType);
638   if (SrcFX != DstFX) {
639     for (const auto &Diff : FunctionEffectDifferences(SrcFX, DstFX)) {
640       if (Diff.shouldDiagnoseConversion(SrcType, SrcFX, DstType, DstFX))
641         Diag(Loc, diag::warn_invalid_add_func_effects) << Diff.effectName();
642     }
643   }
644 }
645 
646 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
647   // nullptr only exists from C++11 on, so don't warn on its absence earlier.
648   if (!getLangOpts().CPlusPlus11)
649     return;
650 
651   if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
652     return;
653 
654   const Expr *EStripped = E->IgnoreParenImpCasts();
655   if (EStripped->getType()->isNullPtrType())
656     return;
657   if (isa<GNUNullExpr>(EStripped))
658     return;
659 
660   if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
661                       E->getBeginLoc()))
662     return;
663 
664   // Don't diagnose the conversion from a 0 literal to a null pointer argument
665   // in a synthesized call to operator<=>.
666   if (!CodeSynthesisContexts.empty() &&
667       CodeSynthesisContexts.back().Kind ==
668           CodeSynthesisContext::RewritingOperatorAsSpaceship)
669     return;
670 
671   // Ignore null pointers in defaulted comparison operators.
672   FunctionDecl *FD = getCurFunctionDecl();
673   if (FD && FD->isDefaulted()) {
674     return;
675   }
676 
677   // If it is a macro from system header, and if the macro name is not "NULL",
678   // do not warn.
679   // Note that uses of "NULL" will be ignored above on systems that define it
680   // as __null.
681   SourceLocation MaybeMacroLoc = E->getBeginLoc();
682   if (Diags.getSuppressSystemWarnings() &&
683       SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
684       !findMacroSpelling(MaybeMacroLoc, "NULL"))
685     return;
686 
687   Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
688       << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
689 }
690 
691 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
692 /// If there is already an implicit cast, merge into the existing one.
693 /// The result is of the given category.
694 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
695                                    CastKind Kind, ExprValueKind VK,
696                                    const CXXCastPath *BasePath,
697                                    CheckedConversionKind CCK) {
698 #ifndef NDEBUG
699   if (VK == VK_PRValue && !E->isPRValue()) {
700     switch (Kind) {
701     default:
702       llvm_unreachable(
703           ("can't implicitly cast glvalue to prvalue with this cast "
704            "kind: " +
705            std::string(CastExpr::getCastKindName(Kind)))
706               .c_str());
707     case CK_Dependent:
708     case CK_LValueToRValue:
709     case CK_ArrayToPointerDecay:
710     case CK_FunctionToPointerDecay:
711     case CK_ToVoid:
712     case CK_NonAtomicToAtomic:
713     case CK_HLSLArrayRValue:
714       break;
715     }
716   }
717   assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
718          "can't cast prvalue to glvalue");
719 #endif
720 
721   diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
722   diagnoseZeroToNullptrConversion(Kind, E);
723   if (Context.hasAnyFunctionEffects() && !isCast(CCK) &&
724       Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
725     diagnoseFunctionEffectConversion(Ty, E->getType(), E->getBeginLoc());
726 
727   QualType ExprTy = Context.getCanonicalType(E->getType());
728   QualType TypeTy = Context.getCanonicalType(Ty);
729 
730   if (ExprTy == TypeTy)
731     return E;
732 
733   if (Kind == CK_ArrayToPointerDecay) {
734     // C++1z [conv.array]: The temporary materialization conversion is applied.
735     // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
736     if (getLangOpts().CPlusPlus && E->isPRValue()) {
737       // The temporary is an lvalue in C++98 and an xvalue otherwise.
738       ExprResult Materialized = CreateMaterializeTemporaryExpr(
739           E->getType(), E, !getLangOpts().CPlusPlus11);
740       if (Materialized.isInvalid())
741         return ExprError();
742       E = Materialized.get();
743     }
744     // C17 6.7.1p6 footnote 124: The implementation can treat any register
745     // declaration simply as an auto declaration. However, whether or not
746     // addressable storage is actually used, the address of any part of an
747     // object declared with storage-class specifier register cannot be
748     // computed, either explicitly(by use of the unary & operator as discussed
749     // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
750     // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
751     // array declared with storage-class specifier register is sizeof.
752     if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
753       if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
754         if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
755           if (VD->getStorageClass() == SC_Register) {
756             Diag(E->getExprLoc(), diag::err_typecheck_address_of)
757                 << /*register variable*/ 3 << E->getSourceRange();
758             return ExprError();
759           }
760         }
761       }
762     }
763   }
764 
765   if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
766     if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
767       ImpCast->setType(Ty);
768       ImpCast->setValueKind(VK);
769       return E;
770     }
771   }
772 
773   return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK,
774                                   CurFPFeatureOverrides());
775 }
776 
777 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
778   switch (ScalarTy->getScalarTypeKind()) {
779   case Type::STK_Bool: return CK_NoOp;
780   case Type::STK_CPointer: return CK_PointerToBoolean;
781   case Type::STK_BlockPointer: return CK_PointerToBoolean;
782   case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
783   case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
784   case Type::STK_Integral: return CK_IntegralToBoolean;
785   case Type::STK_Floating: return CK_FloatingToBoolean;
786   case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
787   case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
788   case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
789   }
790   llvm_unreachable("unknown scalar type kind");
791 }
792 
793 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
794 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
795   if (D->getMostRecentDecl()->isUsed())
796     return true;
797 
798   if (D->isExternallyVisible())
799     return true;
800 
801   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
802     // If this is a function template and none of its specializations is used,
803     // we should warn.
804     if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
805       for (const auto *Spec : Template->specializations())
806         if (ShouldRemoveFromUnused(SemaRef, Spec))
807           return true;
808 
809     // UnusedFileScopedDecls stores the first declaration.
810     // The declaration may have become definition so check again.
811     const FunctionDecl *DeclToCheck;
812     if (FD->hasBody(DeclToCheck))
813       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
814 
815     // Later redecls may add new information resulting in not having to warn,
816     // so check again.
817     DeclToCheck = FD->getMostRecentDecl();
818     if (DeclToCheck != FD)
819       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
820   }
821 
822   if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
823     // If a variable usable in constant expressions is referenced,
824     // don't warn if it isn't used: if the value of a variable is required
825     // for the computation of a constant expression, it doesn't make sense to
826     // warn even if the variable isn't odr-used.  (isReferenced doesn't
827     // precisely reflect that, but it's a decent approximation.)
828     if (VD->isReferenced() &&
829         VD->mightBeUsableInConstantExpressions(SemaRef->Context))
830       return true;
831 
832     if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
833       // If this is a variable template and none of its specializations is used,
834       // we should warn.
835       for (const auto *Spec : Template->specializations())
836         if (ShouldRemoveFromUnused(SemaRef, Spec))
837           return true;
838 
839     // UnusedFileScopedDecls stores the first declaration.
840     // The declaration may have become definition so check again.
841     const VarDecl *DeclToCheck = VD->getDefinition();
842     if (DeclToCheck)
843       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
844 
845     // Later redecls may add new information resulting in not having to warn,
846     // so check again.
847     DeclToCheck = VD->getMostRecentDecl();
848     if (DeclToCheck != VD)
849       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
850   }
851 
852   return false;
853 }
854 
855 static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
856   if (const auto *FD = dyn_cast<FunctionDecl>(ND))
857     return FD->isExternC();
858   return cast<VarDecl>(ND)->isExternC();
859 }
860 
861 /// Determine whether ND is an external-linkage function or variable whose
862 /// type has no linkage.
863 bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
864   // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
865   // because we also want to catch the case where its type has VisibleNoLinkage,
866   // which does not affect the linkage of VD.
867   return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
868          !isExternalFormalLinkage(VD->getType()->getLinkage()) &&
869          !isFunctionOrVarDeclExternC(VD);
870 }
871 
872 /// Obtains a sorted list of functions and variables that are undefined but
873 /// ODR-used.
874 void Sema::getUndefinedButUsed(
875     SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
876   for (const auto &UndefinedUse : UndefinedButUsed) {
877     NamedDecl *ND = UndefinedUse.first;
878 
879     // Ignore attributes that have become invalid.
880     if (ND->isInvalidDecl()) continue;
881 
882     // __attribute__((weakref)) is basically a definition.
883     if (ND->hasAttr<WeakRefAttr>()) continue;
884 
885     if (isa<CXXDeductionGuideDecl>(ND))
886       continue;
887 
888     if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
889       // An exported function will always be emitted when defined, so even if
890       // the function is inline, it doesn't have to be emitted in this TU. An
891       // imported function implies that it has been exported somewhere else.
892       continue;
893     }
894 
895     if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
896       if (FD->isDefined())
897         continue;
898       if (FD->isExternallyVisible() &&
899           !isExternalWithNoLinkageType(FD) &&
900           !FD->getMostRecentDecl()->isInlined() &&
901           !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
902         continue;
903       if (FD->getBuiltinID())
904         continue;
905     } else {
906       const auto *VD = cast<VarDecl>(ND);
907       if (VD->hasDefinition() != VarDecl::DeclarationOnly)
908         continue;
909       if (VD->isExternallyVisible() &&
910           !isExternalWithNoLinkageType(VD) &&
911           !VD->getMostRecentDecl()->isInline() &&
912           !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
913         continue;
914 
915       // Skip VarDecls that lack formal definitions but which we know are in
916       // fact defined somewhere.
917       if (VD->isKnownToBeDefined())
918         continue;
919     }
920 
921     Undefined.push_back(std::make_pair(ND, UndefinedUse.second));
922   }
923 }
924 
925 /// checkUndefinedButUsed - Check for undefined objects with internal linkage
926 /// or that are inline.
927 static void checkUndefinedButUsed(Sema &S) {
928   if (S.UndefinedButUsed.empty()) return;
929 
930   // Collect all the still-undefined entities with internal linkage.
931   SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
932   S.getUndefinedButUsed(Undefined);
933   S.UndefinedButUsed.clear();
934   if (Undefined.empty()) return;
935 
936   for (const auto &Undef : Undefined) {
937     ValueDecl *VD = cast<ValueDecl>(Undef.first);
938     SourceLocation UseLoc = Undef.second;
939 
940     if (S.isExternalWithNoLinkageType(VD)) {
941       // C++ [basic.link]p8:
942       //   A type without linkage shall not be used as the type of a variable
943       //   or function with external linkage unless
944       //    -- the entity has C language linkage
945       //    -- the entity is not odr-used or is defined in the same TU
946       //
947       // As an extension, accept this in cases where the type is externally
948       // visible, since the function or variable actually can be defined in
949       // another translation unit in that case.
950       S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage())
951                                     ? diag::ext_undefined_internal_type
952                                     : diag::err_undefined_internal_type)
953         << isa<VarDecl>(VD) << VD;
954     } else if (!VD->isExternallyVisible()) {
955       // FIXME: We can promote this to an error. The function or variable can't
956       // be defined anywhere else, so the program must necessarily violate the
957       // one definition rule.
958       bool IsImplicitBase = false;
959       if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) {
960         auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
961         if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
962                           llvm::omp::TraitProperty::
963                               implementation_extension_disable_implicit_base)) {
964           const auto *Func = cast<FunctionDecl>(
965               cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl());
966           IsImplicitBase = BaseD->isImplicit() &&
967                            Func->getIdentifier()->isMangledOpenMPVariantName();
968         }
969       }
970       if (!S.getLangOpts().OpenMP || !IsImplicitBase)
971         S.Diag(VD->getLocation(), diag::warn_undefined_internal)
972             << isa<VarDecl>(VD) << VD;
973     } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
974       (void)FD;
975       assert(FD->getMostRecentDecl()->isInlined() &&
976              "used object requires definition but isn't inline or internal?");
977       // FIXME: This is ill-formed; we should reject.
978       S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD;
979     } else {
980       assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
981              "used var requires definition but isn't inline or internal?");
982       S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD;
983     }
984     if (UseLoc.isValid())
985       S.Diag(UseLoc, diag::note_used_here);
986   }
987 }
988 
989 void Sema::LoadExternalWeakUndeclaredIdentifiers() {
990   if (!ExternalSource)
991     return;
992 
993   SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
994   ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
995   for (auto &WeakID : WeakIDs)
996     (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second);
997 }
998 
999 
1000 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
1001 
1002 /// Returns true, if all methods and nested classes of the given
1003 /// CXXRecordDecl are defined in this translation unit.
1004 ///
1005 /// Should only be called from ActOnEndOfTranslationUnit so that all
1006 /// definitions are actually read.
1007 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
1008                                             RecordCompleteMap &MNCComplete) {
1009   RecordCompleteMap::iterator Cache = MNCComplete.find(RD);
1010   if (Cache != MNCComplete.end())
1011     return Cache->second;
1012   if (!RD->isCompleteDefinition())
1013     return false;
1014   bool Complete = true;
1015   for (DeclContext::decl_iterator I = RD->decls_begin(),
1016                                   E = RD->decls_end();
1017        I != E && Complete; ++I) {
1018     if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
1019       Complete = M->isDefined() || M->isDefaulted() ||
1020                  (M->isPureVirtual() && !isa<CXXDestructorDecl>(M));
1021     else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
1022       // If the template function is marked as late template parsed at this
1023       // point, it has not been instantiated and therefore we have not
1024       // performed semantic analysis on it yet, so we cannot know if the type
1025       // can be considered complete.
1026       Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
1027                   F->getTemplatedDecl()->isDefined();
1028     else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) {
1029       if (R->isInjectedClassName())
1030         continue;
1031       if (R->hasDefinition())
1032         Complete = MethodsAndNestedClassesComplete(R->getDefinition(),
1033                                                    MNCComplete);
1034       else
1035         Complete = false;
1036     }
1037   }
1038   MNCComplete[RD] = Complete;
1039   return Complete;
1040 }
1041 
1042 /// Returns true, if the given CXXRecordDecl is fully defined in this
1043 /// translation unit, i.e. all methods are defined or pure virtual and all
1044 /// friends, friend functions and nested classes are fully defined in this
1045 /// translation unit.
1046 ///
1047 /// Should only be called from ActOnEndOfTranslationUnit so that all
1048 /// definitions are actually read.
1049 static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
1050                                  RecordCompleteMap &RecordsComplete,
1051                                  RecordCompleteMap &MNCComplete) {
1052   RecordCompleteMap::iterator Cache = RecordsComplete.find(RD);
1053   if (Cache != RecordsComplete.end())
1054     return Cache->second;
1055   bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
1056   for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
1057                                       E = RD->friend_end();
1058        I != E && Complete; ++I) {
1059     // Check if friend classes and methods are complete.
1060     if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
1061       // Friend classes are available as the TypeSourceInfo of the FriendDecl.
1062       if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
1063         Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete);
1064       else
1065         Complete = false;
1066     } else {
1067       // Friend functions are available through the NamedDecl of FriendDecl.
1068       if (const FunctionDecl *FD =
1069           dyn_cast<FunctionDecl>((*I)->getFriendDecl()))
1070         Complete = FD->isDefined();
1071       else
1072         // This is a template friend, give up.
1073         Complete = false;
1074     }
1075   }
1076   RecordsComplete[RD] = Complete;
1077   return Complete;
1078 }
1079 
1080 void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1081   if (ExternalSource)
1082     ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1083         UnusedLocalTypedefNameCandidates);
1084   for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1085     if (TD->isReferenced())
1086       continue;
1087     Diag(TD->getLocation(), diag::warn_unused_local_typedef)
1088         << isa<TypeAliasDecl>(TD) << TD->getDeclName();
1089   }
1090   UnusedLocalTypedefNameCandidates.clear();
1091 }
1092 
1093 void Sema::ActOnStartOfTranslationUnit() {
1094   if (getLangOpts().CPlusPlusModules &&
1095       getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1096     HandleStartOfHeaderUnit();
1097 }
1098 
1099 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1100   // No explicit actions are required at the end of the global module fragment.
1101   if (Kind == TUFragmentKind::Global)
1102     return;
1103 
1104   // Transfer late parsed template instantiations over to the pending template
1105   // instantiation list. During normal compilation, the late template parser
1106   // will be installed and instantiating these templates will succeed.
1107   //
1108   // If we are building a TU prefix for serialization, it is also safe to
1109   // transfer these over, even though they are not parsed. The end of the TU
1110   // should be outside of any eager template instantiation scope, so when this
1111   // AST is deserialized, these templates will not be parsed until the end of
1112   // the combined TU.
1113   PendingInstantiations.insert(PendingInstantiations.end(),
1114                                LateParsedInstantiations.begin(),
1115                                LateParsedInstantiations.end());
1116   LateParsedInstantiations.clear();
1117 
1118   // If DefinedUsedVTables ends up marking any virtual member functions it
1119   // might lead to more pending template instantiations, which we then need
1120   // to instantiate.
1121   DefineUsedVTables();
1122 
1123   // C++: Perform implicit template instantiations.
1124   //
1125   // FIXME: When we perform these implicit instantiations, we do not
1126   // carefully keep track of the point of instantiation (C++ [temp.point]).
1127   // This means that name lookup that occurs within the template
1128   // instantiation will always happen at the end of the translation unit,
1129   // so it will find some names that are not required to be found. This is
1130   // valid, but we could do better by diagnosing if an instantiation uses a
1131   // name that was not visible at its first point of instantiation.
1132   if (ExternalSource) {
1133     // Load pending instantiations from the external source.
1134     SmallVector<PendingImplicitInstantiation, 4> Pending;
1135     ExternalSource->ReadPendingInstantiations(Pending);
1136     for (auto PII : Pending)
1137       if (auto Func = dyn_cast<FunctionDecl>(PII.first))
1138         Func->setInstantiationIsPending(true);
1139     PendingInstantiations.insert(PendingInstantiations.begin(),
1140                                  Pending.begin(), Pending.end());
1141   }
1142 
1143   {
1144     llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1145     PerformPendingInstantiations();
1146   }
1147 
1148   emitDeferredDiags();
1149 
1150   assert(LateParsedInstantiations.empty() &&
1151          "end of TU template instantiation should not create more "
1152          "late-parsed templates");
1153 
1154   // Report diagnostics for uncorrected delayed typos. Ideally all of them
1155   // should have been corrected by that time, but it is very hard to cover all
1156   // cases in practice.
1157   for (const auto &Typo : DelayedTypos) {
1158     // We pass an empty TypoCorrection to indicate no correction was performed.
1159     Typo.second.DiagHandler(TypoCorrection());
1160   }
1161   DelayedTypos.clear();
1162 }
1163 
1164 void Sema::ActOnEndOfTranslationUnit() {
1165   assert(DelayedDiagnostics.getCurrentPool() == nullptr
1166          && "reached end of translation unit with a pool attached?");
1167 
1168   // If code completion is enabled, don't perform any end-of-translation-unit
1169   // work.
1170   if (PP.isCodeCompletionEnabled())
1171     return;
1172 
1173   // Complete translation units and modules define vtables and perform implicit
1174   // instantiations. PCH files do not.
1175   if (TUKind != TU_Prefix) {
1176     ObjC().DiagnoseUseOfUnimplementedSelectors();
1177 
1178     ActOnEndOfTranslationUnitFragment(
1179         !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1180                                      Module::PrivateModuleFragment
1181             ? TUFragmentKind::Private
1182             : TUFragmentKind::Normal);
1183 
1184     if (LateTemplateParserCleanup)
1185       LateTemplateParserCleanup(OpaqueParser);
1186 
1187     CheckDelayedMemberExceptionSpecs();
1188   } else {
1189     // If we are building a TU prefix for serialization, it is safe to transfer
1190     // these over, even though they are not parsed. The end of the TU should be
1191     // outside of any eager template instantiation scope, so when this AST is
1192     // deserialized, these templates will not be parsed until the end of the
1193     // combined TU.
1194     PendingInstantiations.insert(PendingInstantiations.end(),
1195                                  LateParsedInstantiations.begin(),
1196                                  LateParsedInstantiations.end());
1197     LateParsedInstantiations.clear();
1198 
1199     if (LangOpts.PCHInstantiateTemplates) {
1200       llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1201       PerformPendingInstantiations();
1202     }
1203   }
1204 
1205   DiagnoseUnterminatedPragmaAlignPack();
1206   DiagnoseUnterminatedPragmaAttribute();
1207   OpenMP().DiagnoseUnterminatedOpenMPDeclareTarget();
1208 
1209   // All delayed member exception specs should be checked or we end up accepting
1210   // incompatible declarations.
1211   assert(DelayedOverridingExceptionSpecChecks.empty());
1212   assert(DelayedEquivalentExceptionSpecChecks.empty());
1213 
1214   // All dllexport classes should have been processed already.
1215   assert(DelayedDllExportClasses.empty());
1216   assert(DelayedDllExportMemberFunctions.empty());
1217 
1218   // Remove file scoped decls that turned out to be used.
1219   UnusedFileScopedDecls.erase(
1220       std::remove_if(UnusedFileScopedDecls.begin(nullptr, true),
1221                      UnusedFileScopedDecls.end(),
1222                      [this](const DeclaratorDecl *DD) {
1223                        return ShouldRemoveFromUnused(this, DD);
1224                      }),
1225       UnusedFileScopedDecls.end());
1226 
1227   if (TUKind == TU_Prefix) {
1228     // Translation unit prefixes don't need any of the checking below.
1229     if (!PP.isIncrementalProcessingEnabled())
1230       TUScope = nullptr;
1231     return;
1232   }
1233 
1234   // Check for #pragma weak identifiers that were never declared
1235   LoadExternalWeakUndeclaredIdentifiers();
1236   for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1237     if (WeakIDs.second.empty())
1238       continue;
1239 
1240     Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(),
1241                                       LookupOrdinaryName);
1242     if (PrevDecl != nullptr &&
1243         !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
1244       for (const auto &WI : WeakIDs.second)
1245         Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
1246             << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
1247     else
1248       for (const auto &WI : WeakIDs.second)
1249         Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
1250             << WeakIDs.first;
1251   }
1252 
1253   if (LangOpts.CPlusPlus11 &&
1254       !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
1255     CheckDelegatingCtorCycles();
1256 
1257   if (!Diags.hasErrorOccurred()) {
1258     if (ExternalSource)
1259       ExternalSource->ReadUndefinedButUsed(UndefinedButUsed);
1260     checkUndefinedButUsed(*this);
1261   }
1262 
1263   // A global-module-fragment is only permitted within a module unit.
1264   if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1265                                    Module::ExplicitGlobalModuleFragment) {
1266     Diag(ModuleScopes.back().BeginLoc,
1267          diag::err_module_declaration_missing_after_global_module_introducer);
1268   }
1269 
1270   // Now we can decide whether the modules we're building need an initializer.
1271   if (Module *CurrentModule = getCurrentModule();
1272       CurrentModule && CurrentModule->isInterfaceOrPartition()) {
1273     auto DoesModNeedInit = [this](Module *M) {
1274       if (!getASTContext().getModuleInitializers(M).empty())
1275         return true;
1276       for (auto [Exported, _] : M->Exports)
1277         if (Exported->isNamedModuleInterfaceHasInit())
1278           return true;
1279       for (Module *I : M->Imports)
1280         if (I->isNamedModuleInterfaceHasInit())
1281           return true;
1282 
1283       return false;
1284     };
1285 
1286     CurrentModule->NamedModuleHasInit =
1287         DoesModNeedInit(CurrentModule) ||
1288         llvm::any_of(CurrentModule->submodules(),
1289                      [&](auto *SubM) { return DoesModNeedInit(SubM); });
1290   }
1291 
1292   if (TUKind == TU_ClangModule) {
1293     // If we are building a module, resolve all of the exported declarations
1294     // now.
1295     if (Module *CurrentModule = PP.getCurrentModule()) {
1296       ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1297 
1298       SmallVector<Module *, 2> Stack;
1299       Stack.push_back(CurrentModule);
1300       while (!Stack.empty()) {
1301         Module *Mod = Stack.pop_back_val();
1302 
1303         // Resolve the exported declarations and conflicts.
1304         // FIXME: Actually complain, once we figure out how to teach the
1305         // diagnostic client to deal with complaints in the module map at this
1306         // point.
1307         ModMap.resolveExports(Mod, /*Complain=*/false);
1308         ModMap.resolveUses(Mod, /*Complain=*/false);
1309         ModMap.resolveConflicts(Mod, /*Complain=*/false);
1310 
1311         // Queue the submodules, so their exports will also be resolved.
1312         auto SubmodulesRange = Mod->submodules();
1313         Stack.append(SubmodulesRange.begin(), SubmodulesRange.end());
1314       }
1315     }
1316 
1317     // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1318     // modules when they are built, not every time they are used.
1319     emitAndClearUnusedLocalTypedefWarnings();
1320   }
1321 
1322   // C++ standard modules. Diagnose cases where a function is declared inline
1323   // in the module purview but has no definition before the end of the TU or
1324   // the start of a Private Module Fragment (if one is present).
1325   if (!PendingInlineFuncDecls.empty()) {
1326     for (auto *D : PendingInlineFuncDecls) {
1327       if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1328         bool DefInPMF = false;
1329         if (auto *FDD = FD->getDefinition()) {
1330           DefInPMF = FDD->getOwningModule()->isPrivateModule();
1331           if (!DefInPMF)
1332             continue;
1333         }
1334         Diag(FD->getLocation(), diag::err_export_inline_not_defined)
1335             << DefInPMF;
1336         // If we have a PMF it should be at the end of the ModuleScopes.
1337         if (DefInPMF &&
1338             ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1339           Diag(ModuleScopes.back().BeginLoc,
1340                diag::note_private_module_fragment);
1341         }
1342       }
1343     }
1344     PendingInlineFuncDecls.clear();
1345   }
1346 
1347   // C99 6.9.2p2:
1348   //   A declaration of an identifier for an object that has file
1349   //   scope without an initializer, and without a storage-class
1350   //   specifier or with the storage-class specifier static,
1351   //   constitutes a tentative definition. If a translation unit
1352   //   contains one or more tentative definitions for an identifier,
1353   //   and the translation unit contains no external definition for
1354   //   that identifier, then the behavior is exactly as if the
1355   //   translation unit contains a file scope declaration of that
1356   //   identifier, with the composite type as of the end of the
1357   //   translation unit, with an initializer equal to 0.
1358   llvm::SmallSet<VarDecl *, 32> Seen;
1359   for (TentativeDefinitionsType::iterator
1360            T = TentativeDefinitions.begin(ExternalSource.get()),
1361            TEnd = TentativeDefinitions.end();
1362        T != TEnd; ++T) {
1363     VarDecl *VD = (*T)->getActingDefinition();
1364 
1365     // If the tentative definition was completed, getActingDefinition() returns
1366     // null. If we've already seen this variable before, insert()'s second
1367     // return value is false.
1368     if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second)
1369       continue;
1370 
1371     if (const IncompleteArrayType *ArrayT
1372         = Context.getAsIncompleteArrayType(VD->getType())) {
1373       // Set the length of the array to 1 (C99 6.9.2p5).
1374       Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
1375       llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
1376       QualType T = Context.getConstantArrayType(
1377           ArrayT->getElementType(), One, nullptr, ArraySizeModifier::Normal, 0);
1378       VD->setType(T);
1379     } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
1380                                    diag::err_tentative_def_incomplete_type))
1381       VD->setInvalidDecl();
1382 
1383     // No initialization is performed for a tentative definition.
1384     CheckCompleteVariableDeclaration(VD);
1385 
1386     // Notify the consumer that we've completed a tentative definition.
1387     if (!VD->isInvalidDecl())
1388       Consumer.CompleteTentativeDefinition(VD);
1389   }
1390 
1391   for (auto *D : ExternalDeclarations) {
1392     if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1393       continue;
1394 
1395     Consumer.CompleteExternalDeclaration(D);
1396   }
1397 
1398   if (LangOpts.HLSL)
1399     HLSL().DiagnoseAvailabilityViolations(
1400         getASTContext().getTranslationUnitDecl());
1401 
1402   // If there were errors, disable 'unused' warnings since they will mostly be
1403   // noise. Don't warn for a use from a module: either we should warn on all
1404   // file-scope declarations in modules or not at all, but whether the
1405   // declaration is used is immaterial.
1406   if (!Diags.hasErrorOccurred() && TUKind != TU_ClangModule) {
1407     // Output warning for unused file scoped decls.
1408     for (UnusedFileScopedDeclsType::iterator
1409              I = UnusedFileScopedDecls.begin(ExternalSource.get()),
1410              E = UnusedFileScopedDecls.end();
1411          I != E; ++I) {
1412       if (ShouldRemoveFromUnused(this, *I))
1413         continue;
1414 
1415       if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
1416         const FunctionDecl *DiagD;
1417         if (!FD->hasBody(DiagD))
1418           DiagD = FD;
1419         if (DiagD->isDeleted())
1420           continue; // Deleted functions are supposed to be unused.
1421         SourceRange DiagRange = DiagD->getLocation();
1422         if (const ASTTemplateArgumentListInfo *ASTTAL =
1423                 DiagD->getTemplateSpecializationArgsAsWritten())
1424           DiagRange.setEnd(ASTTAL->RAngleLoc);
1425         if (DiagD->isReferenced()) {
1426           if (isa<CXXMethodDecl>(DiagD))
1427             Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
1428                 << DiagD << DiagRange;
1429           else {
1430             if (FD->getStorageClass() == SC_Static &&
1431                 !FD->isInlineSpecified() &&
1432                 !SourceMgr.isInMainFile(
1433                    SourceMgr.getExpansionLoc(FD->getLocation())))
1434               Diag(DiagD->getLocation(),
1435                    diag::warn_unneeded_static_internal_decl)
1436                   << DiagD << DiagRange;
1437             else
1438               Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1439                   << /*function=*/0 << DiagD << DiagRange;
1440           }
1441         } else if (!FD->isTargetMultiVersion() ||
1442                    FD->isTargetMultiVersionDefault()) {
1443           if (FD->getDescribedFunctionTemplate())
1444             Diag(DiagD->getLocation(), diag::warn_unused_template)
1445                 << /*function=*/0 << DiagD << DiagRange;
1446           else
1447             Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
1448                                            ? diag::warn_unused_member_function
1449                                            : diag::warn_unused_function)
1450                 << DiagD << DiagRange;
1451         }
1452       } else {
1453         const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
1454         if (!DiagD)
1455           DiagD = cast<VarDecl>(*I);
1456         SourceRange DiagRange = DiagD->getLocation();
1457         if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(DiagD)) {
1458           if (const ASTTemplateArgumentListInfo *ASTTAL =
1459                   VTSD->getTemplateArgsAsWritten())
1460             DiagRange.setEnd(ASTTAL->RAngleLoc);
1461         }
1462         if (DiagD->isReferenced()) {
1463           Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1464               << /*variable=*/1 << DiagD << DiagRange;
1465         } else if (DiagD->getDescribedVarTemplate()) {
1466           Diag(DiagD->getLocation(), diag::warn_unused_template)
1467               << /*variable=*/1 << DiagD << DiagRange;
1468         } else if (DiagD->getType().isConstQualified()) {
1469           const SourceManager &SM = SourceMgr;
1470           if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
1471               !PP.getLangOpts().IsHeaderFile)
1472             Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
1473                 << DiagD << DiagRange;
1474         } else {
1475           Diag(DiagD->getLocation(), diag::warn_unused_variable)
1476               << DiagD << DiagRange;
1477         }
1478       }
1479     }
1480 
1481     emitAndClearUnusedLocalTypedefWarnings();
1482   }
1483 
1484   if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
1485     // FIXME: Load additional unused private field candidates from the external
1486     // source.
1487     RecordCompleteMap RecordsComplete;
1488     RecordCompleteMap MNCComplete;
1489     for (const NamedDecl *D : UnusedPrivateFields) {
1490       const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
1491       if (RD && !RD->isUnion() &&
1492           IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1493         Diag(D->getLocation(), diag::warn_unused_private_field)
1494               << D->getDeclName();
1495       }
1496     }
1497   }
1498 
1499   if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
1500     if (ExternalSource)
1501       ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1502     for (const auto &DeletedFieldInfo : DeleteExprs) {
1503       for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1504         AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first,
1505                                   DeleteExprLoc.second);
1506       }
1507     }
1508   }
1509 
1510   AnalysisWarnings.IssueWarnings(Context.getTranslationUnitDecl());
1511 
1512   // Check we've noticed that we're no longer parsing the initializer for every
1513   // variable. If we miss cases, then at best we have a performance issue and
1514   // at worst a rejects-valid bug.
1515   assert(ParsingInitForAutoVars.empty() &&
1516          "Didn't unmark var as having its initializer parsed");
1517 
1518   if (!PP.isIncrementalProcessingEnabled())
1519     TUScope = nullptr;
1520 }
1521 
1522 
1523 //===----------------------------------------------------------------------===//
1524 // Helper functions.
1525 //===----------------------------------------------------------------------===//
1526 
1527 DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
1528   DeclContext *DC = CurContext;
1529 
1530   while (true) {
1531     if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) ||
1532         isa<RequiresExprBodyDecl>(DC)) {
1533       DC = DC->getParent();
1534     } else if (!AllowLambda && isa<CXXMethodDecl>(DC) &&
1535                cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
1536                cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
1537       DC = DC->getParent()->getParent();
1538     } else break;
1539   }
1540 
1541   return DC;
1542 }
1543 
1544 /// getCurFunctionDecl - If inside of a function body, this returns a pointer
1545 /// to the function decl for the function being parsed.  If we're currently
1546 /// in a 'block', this returns the containing context.
1547 FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
1548   DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1549   return dyn_cast<FunctionDecl>(DC);
1550 }
1551 
1552 ObjCMethodDecl *Sema::getCurMethodDecl() {
1553   DeclContext *DC = getFunctionLevelDeclContext();
1554   while (isa<RecordDecl>(DC))
1555     DC = DC->getParent();
1556   return dyn_cast<ObjCMethodDecl>(DC);
1557 }
1558 
1559 NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
1560   DeclContext *DC = getFunctionLevelDeclContext();
1561   if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
1562     return cast<NamedDecl>(DC);
1563   return nullptr;
1564 }
1565 
1566 LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1567   if (getLangOpts().OpenCL)
1568     return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1569   return LangAS::Default;
1570 }
1571 
1572 void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
1573   // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1574   // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1575   // been made more painfully obvious by the refactor that introduced this
1576   // function, but it is possible that the incoming argument can be
1577   // eliminated. If it truly cannot be (for example, there is some reentrancy
1578   // issue I am not seeing yet), then there should at least be a clarifying
1579   // comment somewhere.
1580   if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) {
1581     switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
1582               Diags.getCurrentDiagID())) {
1583     case DiagnosticIDs::SFINAE_Report:
1584       // We'll report the diagnostic below.
1585       break;
1586 
1587     case DiagnosticIDs::SFINAE_SubstitutionFailure:
1588       // Count this failure so that we know that template argument deduction
1589       // has failed.
1590       ++NumSFINAEErrors;
1591 
1592       // Make a copy of this suppressed diagnostic and store it with the
1593       // template-deduction information.
1594       if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1595         Diagnostic DiagInfo(&Diags);
1596         (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1597                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1598       }
1599 
1600       Diags.setLastDiagnosticIgnored(true);
1601       Diags.Clear();
1602       return;
1603 
1604     case DiagnosticIDs::SFINAE_AccessControl: {
1605       // Per C++ Core Issue 1170, access control is part of SFINAE.
1606       // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
1607       // make access control a part of SFINAE for the purposes of checking
1608       // type traits.
1609       if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
1610         break;
1611 
1612       SourceLocation Loc = Diags.getCurrentDiagLoc();
1613 
1614       // Suppress this diagnostic.
1615       ++NumSFINAEErrors;
1616 
1617       // Make a copy of this suppressed diagnostic and store it with the
1618       // template-deduction information.
1619       if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1620         Diagnostic DiagInfo(&Diags);
1621         (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1622                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1623       }
1624 
1625       Diags.setLastDiagnosticIgnored(true);
1626       Diags.Clear();
1627 
1628       // Now the diagnostic state is clear, produce a C++98 compatibility
1629       // warning.
1630       Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
1631 
1632       // The last diagnostic which Sema produced was ignored. Suppress any
1633       // notes attached to it.
1634       Diags.setLastDiagnosticIgnored(true);
1635       return;
1636     }
1637 
1638     case DiagnosticIDs::SFINAE_Suppress:
1639       // Make a copy of this suppressed diagnostic and store it with the
1640       // template-deduction information;
1641       if (*Info) {
1642         Diagnostic DiagInfo(&Diags);
1643         (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
1644                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1645       }
1646 
1647       // Suppress this diagnostic.
1648       Diags.setLastDiagnosticIgnored(true);
1649       Diags.Clear();
1650       return;
1651     }
1652   }
1653 
1654   // Copy the diagnostic printing policy over the ASTContext printing policy.
1655   // TODO: Stop doing that.  See: https://reviews.llvm.org/D45093#1090292
1656   Context.setPrintingPolicy(getPrintingPolicy());
1657 
1658   // Emit the diagnostic.
1659   if (!Diags.EmitCurrentDiagnostic())
1660     return;
1661 
1662   // If this is not a note, and we're in a template instantiation
1663   // that is different from the last template instantiation where
1664   // we emitted an error, print a template instantiation
1665   // backtrace.
1666   if (!DiagnosticIDs::isBuiltinNote(DiagID))
1667     PrintContextStack();
1668 }
1669 
1670 bool Sema::hasUncompilableErrorOccurred() const {
1671   if (getDiagnostics().hasUncompilableErrorOccurred())
1672     return true;
1673   auto *FD = dyn_cast<FunctionDecl>(CurContext);
1674   if (!FD)
1675     return false;
1676   auto Loc = DeviceDeferredDiags.find(FD);
1677   if (Loc == DeviceDeferredDiags.end())
1678     return false;
1679   for (auto PDAt : Loc->second) {
1680     if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID()))
1681       return true;
1682   }
1683   return false;
1684 }
1685 
1686 // Print notes showing how we can reach FD starting from an a priori
1687 // known-callable function.
1688 static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
1689   auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(FD);
1690   while (FnIt != S.CUDA().DeviceKnownEmittedFns.end()) {
1691     // Respect error limit.
1692     if (S.Diags.hasFatalErrorOccurred())
1693       return;
1694     DiagnosticBuilder Builder(
1695         S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
1696     Builder << FnIt->second.FD;
1697     FnIt = S.CUDA().DeviceKnownEmittedFns.find(FnIt->second.FD);
1698   }
1699 }
1700 
1701 namespace {
1702 
1703 /// Helper class that emits deferred diagnostic messages if an entity directly
1704 /// or indirectly using the function that causes the deferred diagnostic
1705 /// messages is known to be emitted.
1706 ///
1707 /// During parsing of AST, certain diagnostic messages are recorded as deferred
1708 /// diagnostics since it is unknown whether the functions containing such
1709 /// diagnostics will be emitted. A list of potentially emitted functions and
1710 /// variables that may potentially trigger emission of functions are also
1711 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1712 /// by each function to emit deferred diagnostics.
1713 ///
1714 /// During the visit, certain OpenMP directives or initializer of variables
1715 /// with certain OpenMP attributes will cause subsequent visiting of any
1716 /// functions enter a state which is called OpenMP device context in this
1717 /// implementation. The state is exited when the directive or initializer is
1718 /// exited. This state can change the emission states of subsequent uses
1719 /// of functions.
1720 ///
1721 /// Conceptually the functions or variables to be visited form a use graph
1722 /// where the parent node uses the child node. At any point of the visit,
1723 /// the tree nodes traversed from the tree root to the current node form a use
1724 /// stack. The emission state of the current node depends on two factors:
1725 ///    1. the emission state of the root node
1726 ///    2. whether the current node is in OpenMP device context
1727 /// If the function is decided to be emitted, its contained deferred diagnostics
1728 /// are emitted, together with the information about the use stack.
1729 ///
1730 class DeferredDiagnosticsEmitter
1731     : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1732 public:
1733   typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1734 
1735   // Whether the function is already in the current use-path.
1736   llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1737 
1738   // The current use-path.
1739   llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1740 
1741   // Whether the visiting of the function has been done. Done[0] is for the
1742   // case not in OpenMP device context. Done[1] is for the case in OpenMP
1743   // device context. We need two sets because diagnostics emission may be
1744   // different depending on whether it is in OpenMP device context.
1745   llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1746 
1747   // Emission state of the root node of the current use graph.
1748   bool ShouldEmitRootNode;
1749 
1750   // Current OpenMP device context level. It is initialized to 0 and each
1751   // entering of device context increases it by 1 and each exit decreases
1752   // it by 1. Non-zero value indicates it is currently in device context.
1753   unsigned InOMPDeviceContext;
1754 
1755   DeferredDiagnosticsEmitter(Sema &S)
1756       : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1757 
1758   bool shouldVisitDiscardedStmt() const { return false; }
1759 
1760   void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1761     ++InOMPDeviceContext;
1762     Inherited::VisitOMPTargetDirective(Node);
1763     --InOMPDeviceContext;
1764   }
1765 
1766   void visitUsedDecl(SourceLocation Loc, Decl *D) {
1767     if (isa<VarDecl>(D))
1768       return;
1769     if (auto *FD = dyn_cast<FunctionDecl>(D))
1770       checkFunc(Loc, FD);
1771     else
1772       Inherited::visitUsedDecl(Loc, D);
1773   }
1774 
1775   void checkVar(VarDecl *VD) {
1776     assert(VD->isFileVarDecl() &&
1777            "Should only check file-scope variables");
1778     if (auto *Init = VD->getInit()) {
1779       auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
1780       bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
1781                              *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
1782       if (IsDev)
1783         ++InOMPDeviceContext;
1784       this->Visit(Init);
1785       if (IsDev)
1786         --InOMPDeviceContext;
1787     }
1788   }
1789 
1790   void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
1791     auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
1792     FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1793     if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1794         S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD))
1795       return;
1796     // Finalize analysis of OpenMP-specific constructs.
1797     if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1798         (ShouldEmitRootNode || InOMPDeviceContext))
1799       S.OpenMP().finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
1800     if (Caller)
1801       S.CUDA().DeviceKnownEmittedFns[FD] = {Caller, Loc};
1802     // Always emit deferred diagnostics for the direct users. This does not
1803     // lead to explosion of diagnostics since each user is visited at most
1804     // twice.
1805     if (ShouldEmitRootNode || InOMPDeviceContext)
1806       emitDeferredDiags(FD, Caller);
1807     // Do not revisit a function if the function body has been completely
1808     // visited before.
1809     if (!Done.insert(FD).second)
1810       return;
1811     InUsePath.insert(FD);
1812     UsePath.push_back(FD);
1813     if (auto *S = FD->getBody()) {
1814       this->Visit(S);
1815     }
1816     UsePath.pop_back();
1817     InUsePath.erase(FD);
1818   }
1819 
1820   void checkRecordedDecl(Decl *D) {
1821     if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1822       ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) ==
1823                            Sema::FunctionEmissionStatus::Emitted;
1824       checkFunc(SourceLocation(), FD);
1825     } else
1826       checkVar(cast<VarDecl>(D));
1827   }
1828 
1829   // Emit any deferred diagnostics for FD
1830   void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
1831     auto It = S.DeviceDeferredDiags.find(FD);
1832     if (It == S.DeviceDeferredDiags.end())
1833       return;
1834     bool HasWarningOrError = false;
1835     bool FirstDiag = true;
1836     for (PartialDiagnosticAt &PDAt : It->second) {
1837       // Respect error limit.
1838       if (S.Diags.hasFatalErrorOccurred())
1839         return;
1840       const SourceLocation &Loc = PDAt.first;
1841       const PartialDiagnostic &PD = PDAt.second;
1842       HasWarningOrError |=
1843           S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >=
1844           DiagnosticsEngine::Warning;
1845       {
1846         DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
1847         PD.Emit(Builder);
1848       }
1849       // Emit the note on the first diagnostic in case too many diagnostics
1850       // cause the note not emitted.
1851       if (FirstDiag && HasWarningOrError && ShowCallStack) {
1852         emitCallStackNotes(S, FD);
1853         FirstDiag = false;
1854       }
1855     }
1856   }
1857 };
1858 } // namespace
1859 
1860 void Sema::emitDeferredDiags() {
1861   if (ExternalSource)
1862     ExternalSource->ReadDeclsToCheckForDeferredDiags(
1863         DeclsToCheckForDeferredDiags);
1864 
1865   if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
1866       DeclsToCheckForDeferredDiags.empty())
1867     return;
1868 
1869   DeferredDiagnosticsEmitter DDE(*this);
1870   for (auto *D : DeclsToCheckForDeferredDiags)
1871     DDE.checkRecordedDecl(D);
1872 }
1873 
1874 // In CUDA, there are some constructs which may appear in semantically-valid
1875 // code, but trigger errors if we ever generate code for the function in which
1876 // they appear.  Essentially every construct you're not allowed to use on the
1877 // device falls into this category, because you are allowed to use these
1878 // constructs in a __host__ __device__ function, but only if that function is
1879 // never codegen'ed on the device.
1880 //
1881 // To handle semantic checking for these constructs, we keep track of the set of
1882 // functions we know will be emitted, either because we could tell a priori that
1883 // they would be emitted, or because they were transitively called by a
1884 // known-emitted function.
1885 //
1886 // We also keep a partial call graph of which not-known-emitted functions call
1887 // which other not-known-emitted functions.
1888 //
1889 // When we see something which is illegal if the current function is emitted
1890 // (usually by way of DiagIfDeviceCode, DiagIfHostCode, or
1891 // CheckCall), we first check if the current function is known-emitted.  If
1892 // so, we immediately output the diagnostic.
1893 //
1894 // Otherwise, we "defer" the diagnostic.  It sits in Sema::DeviceDeferredDiags
1895 // until we discover that the function is known-emitted, at which point we take
1896 // it out of this map and emit the diagnostic.
1897 
1898 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
1899                                                    unsigned DiagID,
1900                                                    const FunctionDecl *Fn,
1901                                                    Sema &S)
1902     : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
1903       ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
1904   switch (K) {
1905   case K_Nop:
1906     break;
1907   case K_Immediate:
1908   case K_ImmediateWithCallStack:
1909     ImmediateDiag.emplace(
1910         ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
1911     break;
1912   case K_Deferred:
1913     assert(Fn && "Must have a function to attach the deferred diag to.");
1914     auto &Diags = S.DeviceDeferredDiags[Fn];
1915     PartialDiagId.emplace(Diags.size());
1916     Diags.emplace_back(Loc, S.PDiag(DiagID));
1917     break;
1918   }
1919 }
1920 
1921 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
1922     : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
1923       ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
1924       PartialDiagId(D.PartialDiagId) {
1925   // Clean the previous diagnostics.
1926   D.ShowCallStack = false;
1927   D.ImmediateDiag.reset();
1928   D.PartialDiagId.reset();
1929 }
1930 
1931 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
1932   if (ImmediateDiag) {
1933     // Emit our diagnostic and, if it was a warning or error, output a callstack
1934     // if Fn isn't a priori known-emitted.
1935     bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
1936                                 DiagID, Loc) >= DiagnosticsEngine::Warning;
1937     ImmediateDiag.reset(); // Emit the immediate diag.
1938     if (IsWarningOrError && ShowCallStack)
1939       emitCallStackNotes(S, Fn);
1940   } else {
1941     assert((!PartialDiagId || ShowCallStack) &&
1942            "Must always show call stack for deferred diags.");
1943   }
1944 }
1945 
1946 Sema::SemaDiagnosticBuilder
1947 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
1948   FD = FD ? FD : getCurFunctionDecl();
1949   if (LangOpts.OpenMP)
1950     return LangOpts.OpenMPIsTargetDevice
1951                ? OpenMP().diagIfOpenMPDeviceCode(Loc, DiagID, FD)
1952                : OpenMP().diagIfOpenMPHostCode(Loc, DiagID, FD);
1953   if (getLangOpts().CUDA)
1954     return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID)
1955                                       : CUDA().DiagIfHostCode(Loc, DiagID);
1956 
1957   if (getLangOpts().SYCLIsDevice)
1958     return SYCL().DiagIfDeviceCode(Loc, DiagID);
1959 
1960   return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
1961                                FD, *this);
1962 }
1963 
1964 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
1965   if (isUnevaluatedContext() || Ty.isNull())
1966     return;
1967 
1968   // The original idea behind checkTypeSupport function is that unused
1969   // declarations can be replaced with an array of bytes of the same size during
1970   // codegen, such replacement doesn't seem to be possible for types without
1971   // constant byte size like zero length arrays. So, do a deep check for SYCL.
1972   if (D && LangOpts.SYCLIsDevice) {
1973     llvm::DenseSet<QualType> Visited;
1974     SYCL().deepTypeCheckForDevice(Loc, Visited, D);
1975   }
1976 
1977   Decl *C = cast<Decl>(getCurLexicalContext());
1978 
1979   // Memcpy operations for structs containing a member with unsupported type
1980   // are ok, though.
1981   if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) {
1982     if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
1983         MD->isTrivial())
1984       return;
1985 
1986     if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD))
1987       if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
1988         return;
1989   }
1990 
1991   // Try to associate errors with the lexical context, if that is a function, or
1992   // the value declaration otherwise.
1993   const FunctionDecl *FD = isa<FunctionDecl>(C)
1994                                ? cast<FunctionDecl>(C)
1995                                : dyn_cast_or_null<FunctionDecl>(D);
1996 
1997   auto CheckDeviceType = [&](QualType Ty) {
1998     if (Ty->isDependentType())
1999       return;
2000 
2001     if (Ty->isBitIntType()) {
2002       if (!Context.getTargetInfo().hasBitIntType()) {
2003         PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2004         if (D)
2005           PD << D;
2006         else
2007           PD << "expression";
2008         targetDiag(Loc, PD, FD)
2009             << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
2010             << Ty << Context.getTargetInfo().getTriple().str();
2011       }
2012       return;
2013     }
2014 
2015     // Check if we are dealing with two 'long double' but with different
2016     // semantics.
2017     bool LongDoubleMismatched = false;
2018     if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) {
2019       const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty);
2020       if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
2021            !Context.getTargetInfo().hasFloat128Type()) ||
2022           (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
2023            !Context.getTargetInfo().hasIbm128Type()))
2024         LongDoubleMismatched = true;
2025     }
2026 
2027     if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
2028         (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
2029         (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
2030         (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
2031          !Context.getTargetInfo().hasInt128Type()) ||
2032         (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
2033          !LangOpts.CUDAIsDevice) ||
2034         LongDoubleMismatched) {
2035       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2036       if (D)
2037         PD << D;
2038       else
2039         PD << "expression";
2040 
2041       if (targetDiag(Loc, PD, FD)
2042           << true /*show bit size*/
2043           << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
2044           << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
2045         if (D)
2046           D->setInvalidDecl();
2047       }
2048       if (D)
2049         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2050     }
2051   };
2052 
2053   auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
2054     if (LangOpts.SYCLIsDevice ||
2055         (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
2056         LangOpts.CUDAIsDevice)
2057       CheckDeviceType(Ty);
2058 
2059     QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2060     const TargetInfo &TI = Context.getTargetInfo();
2061     if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2062       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2063       if (D)
2064         PD << D;
2065       else
2066         PD << "expression";
2067 
2068       if (Diag(Loc, PD, FD)
2069           << false /*show bit size*/ << 0 << Ty << false /*return*/
2070           << TI.getTriple().str()) {
2071         if (D)
2072           D->setInvalidDecl();
2073       }
2074       if (D)
2075         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2076     }
2077 
2078     bool IsDouble = UnqualTy == Context.DoubleTy;
2079     bool IsFloat = UnqualTy == Context.FloatTy;
2080     if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2081       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2082       if (D)
2083         PD << D;
2084       else
2085         PD << "expression";
2086 
2087       if (Diag(Loc, PD, FD)
2088           << false /*show bit size*/ << 0 << Ty << true /*return*/
2089           << TI.getTriple().str()) {
2090         if (D)
2091           D->setInvalidDecl();
2092       }
2093       if (D)
2094         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2095     }
2096 
2097     if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
2098       llvm::StringMap<bool> CallerFeatureMap;
2099       Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2100       RISCV().checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
2101     }
2102 
2103     // Don't allow SVE types in functions without a SVE target.
2104     if (Ty->isSVESizelessBuiltinType() && FD) {
2105       llvm::StringMap<bool> CallerFeatureMap;
2106       Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2107       if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap)) {
2108         if (!Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
2109           Diag(Loc, diag::err_sve_vector_in_non_sve_target) << Ty;
2110         else if (!IsArmStreamingFunction(FD,
2111                                          /*IncludeLocallyStreaming=*/true)) {
2112           Diag(Loc, diag::err_sve_vector_in_non_streaming_function) << Ty;
2113         }
2114       }
2115     }
2116   };
2117 
2118   CheckType(Ty);
2119   if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) {
2120     for (const auto &ParamTy : FPTy->param_types())
2121       CheckType(ParamTy);
2122     CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2123   }
2124   if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty))
2125     CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2126 }
2127 
2128 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2129   SourceLocation loc = locref;
2130   if (!loc.isMacroID()) return false;
2131 
2132   // There's no good way right now to look at the intermediate
2133   // expansions, so just jump to the expansion location.
2134   loc = getSourceManager().getExpansionLoc(loc);
2135 
2136   // If that's written with the name, stop here.
2137   SmallString<16> buffer;
2138   if (getPreprocessor().getSpelling(loc, buffer) == name) {
2139     locref = loc;
2140     return true;
2141   }
2142   return false;
2143 }
2144 
2145 Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2146 
2147   if (!Ctx)
2148     return nullptr;
2149 
2150   Ctx = Ctx->getPrimaryContext();
2151   for (Scope *S = getCurScope(); S; S = S->getParent()) {
2152     // Ignore scopes that cannot have declarations. This is important for
2153     // out-of-line definitions of static class members.
2154     if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2155       if (DeclContext *Entity = S->getEntity())
2156         if (Ctx == Entity->getPrimaryContext())
2157           return S;
2158   }
2159 
2160   return nullptr;
2161 }
2162 
2163 /// Enter a new function scope
2164 void Sema::PushFunctionScope() {
2165   if (FunctionScopes.empty() && CachedFunctionScope) {
2166     // Use CachedFunctionScope to avoid allocating memory when possible.
2167     CachedFunctionScope->Clear();
2168     FunctionScopes.push_back(CachedFunctionScope.release());
2169   } else {
2170     FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
2171   }
2172   if (LangOpts.OpenMP)
2173     OpenMP().pushOpenMPFunctionRegion();
2174 }
2175 
2176 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2177   FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
2178                                               BlockScope, Block));
2179   CapturingFunctionScopes++;
2180 }
2181 
2182 LambdaScopeInfo *Sema::PushLambdaScope() {
2183   LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2184   FunctionScopes.push_back(LSI);
2185   CapturingFunctionScopes++;
2186   return LSI;
2187 }
2188 
2189 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2190   if (LambdaScopeInfo *const LSI = getCurLambda()) {
2191     LSI->AutoTemplateParameterDepth = Depth;
2192     return;
2193   }
2194   llvm_unreachable(
2195       "Remove assertion if intentionally called in a non-lambda context.");
2196 }
2197 
2198 // Check that the type of the VarDecl has an accessible copy constructor and
2199 // resolve its destructor's exception specification.
2200 // This also performs initialization of block variables when they are moved
2201 // to the heap. It uses the same rules as applicable for implicit moves
2202 // according to the C++ standard in effect ([class.copy.elision]p3).
2203 static void checkEscapingByref(VarDecl *VD, Sema &S) {
2204   QualType T = VD->getType();
2205   EnterExpressionEvaluationContext scope(
2206       S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2207   SourceLocation Loc = VD->getLocation();
2208   Expr *VarRef =
2209       new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2210   ExprResult Result;
2211   auto IE = InitializedEntity::InitializeBlock(Loc, T);
2212   if (S.getLangOpts().CPlusPlus23) {
2213     auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr,
2214                                        VK_XValue, FPOptionsOverride());
2215     Result = S.PerformCopyInitialization(IE, SourceLocation(), E);
2216   } else {
2217     Result = S.PerformMoveOrCopyInitialization(
2218         IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible},
2219         VarRef);
2220   }
2221 
2222   if (!Result.isInvalid()) {
2223     Result = S.MaybeCreateExprWithCleanups(Result);
2224     Expr *Init = Result.getAs<Expr>();
2225     S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init));
2226   }
2227 
2228   // The destructor's exception specification is needed when IRGen generates
2229   // block copy/destroy functions. Resolve it here.
2230   if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2231     if (CXXDestructorDecl *DD = RD->getDestructor()) {
2232       auto *FPT = DD->getType()->castAs<FunctionProtoType>();
2233       S.ResolveExceptionSpec(Loc, FPT);
2234     }
2235 }
2236 
2237 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2238   // Set the EscapingByref flag of __block variables captured by
2239   // escaping blocks.
2240   for (const BlockDecl *BD : FSI.Blocks) {
2241     for (const BlockDecl::Capture &BC : BD->captures()) {
2242       VarDecl *VD = BC.getVariable();
2243       if (VD->hasAttr<BlocksAttr>()) {
2244         // Nothing to do if this is a __block variable captured by a
2245         // non-escaping block.
2246         if (BD->doesNotEscape())
2247           continue;
2248         VD->setEscapingByref();
2249       }
2250       // Check whether the captured variable is or contains an object of
2251       // non-trivial C union type.
2252       QualType CapType = BC.getVariable()->getType();
2253       if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2254           CapType.hasNonTrivialToPrimitiveCopyCUnion())
2255         S.checkNonTrivialCUnion(BC.getVariable()->getType(),
2256                                 BD->getCaretLocation(),
2257                                 Sema::NTCUC_BlockCapture,
2258                                 Sema::NTCUK_Destruct|Sema::NTCUK_Copy);
2259     }
2260   }
2261 
2262   for (VarDecl *VD : FSI.ByrefBlockVars) {
2263     // __block variables might require us to capture a copy-initializer.
2264     if (!VD->isEscapingByref())
2265       continue;
2266     // It's currently invalid to ever have a __block variable with an
2267     // array type; should we diagnose that here?
2268     // Regardless, we don't want to ignore array nesting when
2269     // constructing this copy.
2270     if (VD->getType()->isStructureOrClassType())
2271       checkEscapingByref(VD, S);
2272   }
2273 }
2274 
2275 Sema::PoppedFunctionScopePtr
2276 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
2277                            const Decl *D, QualType BlockType) {
2278   assert(!FunctionScopes.empty() && "mismatched push/pop!");
2279 
2280   markEscapingByrefs(*FunctionScopes.back(), *this);
2281 
2282   PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2283                                PoppedFunctionScopeDeleter(this));
2284 
2285   if (LangOpts.OpenMP)
2286     OpenMP().popOpenMPFunctionRegion(Scope.get());
2287 
2288   // Issue any analysis-based warnings.
2289   if (WP && D)
2290     AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType);
2291   else
2292     for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2293       Diag(PUD.Loc, PUD.PD);
2294 
2295   return Scope;
2296 }
2297 
2298 void Sema::PoppedFunctionScopeDeleter::
2299 operator()(sema::FunctionScopeInfo *Scope) const {
2300   if (!Scope->isPlainFunction())
2301     Self->CapturingFunctionScopes--;
2302   // Stash the function scope for later reuse if it's for a normal function.
2303   if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2304     Self->CachedFunctionScope.reset(Scope);
2305   else
2306     delete Scope;
2307 }
2308 
2309 void Sema::PushCompoundScope(bool IsStmtExpr) {
2310   getCurFunction()->CompoundScopes.push_back(
2311       CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2312 }
2313 
2314 void Sema::PopCompoundScope() {
2315   FunctionScopeInfo *CurFunction = getCurFunction();
2316   assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2317 
2318   CurFunction->CompoundScopes.pop_back();
2319 }
2320 
2321 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2322   return getCurFunction()->hasUnrecoverableErrorOccurred();
2323 }
2324 
2325 void Sema::setFunctionHasBranchIntoScope() {
2326   if (!FunctionScopes.empty())
2327     FunctionScopes.back()->setHasBranchIntoScope();
2328 }
2329 
2330 void Sema::setFunctionHasBranchProtectedScope() {
2331   if (!FunctionScopes.empty())
2332     FunctionScopes.back()->setHasBranchProtectedScope();
2333 }
2334 
2335 void Sema::setFunctionHasIndirectGoto() {
2336   if (!FunctionScopes.empty())
2337     FunctionScopes.back()->setHasIndirectGoto();
2338 }
2339 
2340 void Sema::setFunctionHasMustTail() {
2341   if (!FunctionScopes.empty())
2342     FunctionScopes.back()->setHasMustTail();
2343 }
2344 
2345 BlockScopeInfo *Sema::getCurBlock() {
2346   if (FunctionScopes.empty())
2347     return nullptr;
2348 
2349   auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back());
2350   if (CurBSI && CurBSI->TheDecl &&
2351       !CurBSI->TheDecl->Encloses(CurContext)) {
2352     // We have switched contexts due to template instantiation.
2353     assert(!CodeSynthesisContexts.empty());
2354     return nullptr;
2355   }
2356 
2357   return CurBSI;
2358 }
2359 
2360 FunctionScopeInfo *Sema::getEnclosingFunction() const {
2361   if (FunctionScopes.empty())
2362     return nullptr;
2363 
2364   for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2365     if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
2366       continue;
2367     return FunctionScopes[e];
2368   }
2369   return nullptr;
2370 }
2371 
2372 LambdaScopeInfo *Sema::getEnclosingLambda() const {
2373   for (auto *Scope : llvm::reverse(FunctionScopes)) {
2374     if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) {
2375       if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
2376           LSI->AfterParameterList) {
2377         // We have switched contexts due to template instantiation.
2378         // FIXME: We should swap out the FunctionScopes during code synthesis
2379         // so that we don't need to check for this.
2380         assert(!CodeSynthesisContexts.empty());
2381         return nullptr;
2382       }
2383       return LSI;
2384     }
2385   }
2386   return nullptr;
2387 }
2388 
2389 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2390   if (FunctionScopes.empty())
2391     return nullptr;
2392 
2393   auto I = FunctionScopes.rbegin();
2394   if (IgnoreNonLambdaCapturingScope) {
2395     auto E = FunctionScopes.rend();
2396     while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I))
2397       ++I;
2398     if (I == E)
2399       return nullptr;
2400   }
2401   auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I);
2402   if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
2403       !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) {
2404     // We have switched contexts due to template instantiation.
2405     assert(!CodeSynthesisContexts.empty());
2406     return nullptr;
2407   }
2408 
2409   return CurLSI;
2410 }
2411 
2412 // We have a generic lambda if we parsed auto parameters, or we have
2413 // an associated template parameter list.
2414 LambdaScopeInfo *Sema::getCurGenericLambda() {
2415   if (LambdaScopeInfo *LSI =  getCurLambda()) {
2416     return (LSI->TemplateParams.size() ||
2417                     LSI->GLTemplateParameterList) ? LSI : nullptr;
2418   }
2419   return nullptr;
2420 }
2421 
2422 
2423 void Sema::ActOnComment(SourceRange Comment) {
2424   if (!LangOpts.RetainCommentsFromSystemHeaders &&
2425       SourceMgr.isInSystemHeader(Comment.getBegin()))
2426     return;
2427   RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2428   if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
2429     SourceRange MagicMarkerRange(Comment.getBegin(),
2430                                  Comment.getBegin().getLocWithOffset(3));
2431     StringRef MagicMarkerText;
2432     switch (RC.getKind()) {
2433     case RawComment::RCK_OrdinaryBCPL:
2434       MagicMarkerText = "///<";
2435       break;
2436     case RawComment::RCK_OrdinaryC:
2437       MagicMarkerText = "/**<";
2438       break;
2439     case RawComment::RCK_Invalid:
2440       // FIXME: are there other scenarios that could produce an invalid
2441       // raw comment here?
2442       Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment);
2443       return;
2444     default:
2445       llvm_unreachable("if this is an almost Doxygen comment, "
2446                        "it should be ordinary");
2447     }
2448     Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
2449       FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
2450   }
2451   Context.addComment(RC);
2452 }
2453 
2454 // Pin this vtable to this file.
2455 ExternalSemaSource::~ExternalSemaSource() {}
2456 char ExternalSemaSource::ID;
2457 
2458 void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
2459 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2460 
2461 void ExternalSemaSource::ReadKnownNamespaces(
2462                            SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2463 }
2464 
2465 void ExternalSemaSource::ReadUndefinedButUsed(
2466     llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2467 
2468 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2469     FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2470 
2471 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2472                          UnresolvedSetImpl &OverloadSet) {
2473   ZeroArgCallReturnTy = QualType();
2474   OverloadSet.clear();
2475 
2476   const OverloadExpr *Overloads = nullptr;
2477   bool IsMemExpr = false;
2478   if (E.getType() == Context.OverloadTy) {
2479     OverloadExpr::FindResult FR = OverloadExpr::find(&E);
2480 
2481     // Ignore overloads that are pointer-to-member constants.
2482     if (FR.HasFormOfMemberPointer)
2483       return false;
2484 
2485     Overloads = FR.Expression;
2486   } else if (E.getType() == Context.BoundMemberTy) {
2487     Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens());
2488     IsMemExpr = true;
2489   }
2490 
2491   bool Ambiguous = false;
2492   bool IsMV = false;
2493 
2494   if (Overloads) {
2495     for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2496          DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2497       OverloadSet.addDecl(*it);
2498 
2499       // Check whether the function is a non-template, non-member which takes no
2500       // arguments.
2501       if (IsMemExpr)
2502         continue;
2503       if (const FunctionDecl *OverloadDecl
2504             = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) {
2505         if (OverloadDecl->getMinRequiredArguments() == 0) {
2506           if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2507               (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2508                           OverloadDecl->isCPUSpecificMultiVersion()))) {
2509             ZeroArgCallReturnTy = QualType();
2510             Ambiguous = true;
2511           } else {
2512             ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2513             IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2514                    OverloadDecl->isCPUSpecificMultiVersion();
2515           }
2516         }
2517       }
2518     }
2519 
2520     // If it's not a member, use better machinery to try to resolve the call
2521     if (!IsMemExpr)
2522       return !ZeroArgCallReturnTy.isNull();
2523   }
2524 
2525   // Attempt to call the member with no arguments - this will correctly handle
2526   // member templates with defaults/deduction of template arguments, overloads
2527   // with default arguments, etc.
2528   if (IsMemExpr && !E.isTypeDependent()) {
2529     Sema::TentativeAnalysisScope Trap(*this);
2530     ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(),
2531                                              std::nullopt, SourceLocation());
2532     if (R.isUsable()) {
2533       ZeroArgCallReturnTy = R.get()->getType();
2534       return true;
2535     }
2536     return false;
2537   }
2538 
2539   if (const auto *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
2540     if (const auto *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
2541       if (Fun->getMinRequiredArguments() == 0)
2542         ZeroArgCallReturnTy = Fun->getReturnType();
2543       return true;
2544     }
2545   }
2546 
2547   // We don't have an expression that's convenient to get a FunctionDecl from,
2548   // but we can at least check if the type is "function of 0 arguments".
2549   QualType ExprTy = E.getType();
2550   const FunctionType *FunTy = nullptr;
2551   QualType PointeeTy = ExprTy->getPointeeType();
2552   if (!PointeeTy.isNull())
2553     FunTy = PointeeTy->getAs<FunctionType>();
2554   if (!FunTy)
2555     FunTy = ExprTy->getAs<FunctionType>();
2556 
2557   if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(FunTy)) {
2558     if (FPT->getNumParams() == 0)
2559       ZeroArgCallReturnTy = FunTy->getReturnType();
2560     return true;
2561   }
2562   return false;
2563 }
2564 
2565 /// Give notes for a set of overloads.
2566 ///
2567 /// A companion to tryExprAsCall. In cases when the name that the programmer
2568 /// wrote was an overloaded function, we may be able to make some guesses about
2569 /// plausible overloads based on their return types; such guesses can be handed
2570 /// off to this method to be emitted as notes.
2571 ///
2572 /// \param Overloads - The overloads to note.
2573 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2574 ///  -fshow-overloads=best, this is the location to attach to the note about too
2575 ///  many candidates. Typically this will be the location of the original
2576 ///  ill-formed expression.
2577 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2578                           const SourceLocation FinalNoteLoc) {
2579   unsigned ShownOverloads = 0;
2580   unsigned SuppressedOverloads = 0;
2581   for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2582        DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2583     if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2584       ++SuppressedOverloads;
2585       continue;
2586     }
2587 
2588     const NamedDecl *Fn = (*It)->getUnderlyingDecl();
2589     // Don't print overloads for non-default multiversioned functions.
2590     if (const auto *FD = Fn->getAsFunction()) {
2591       if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2592           !FD->getAttr<TargetAttr>()->isDefaultVersion())
2593         continue;
2594       if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2595           !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2596         continue;
2597     }
2598     S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
2599     ++ShownOverloads;
2600   }
2601 
2602   S.Diags.overloadCandidatesShown(ShownOverloads);
2603 
2604   if (SuppressedOverloads)
2605     S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
2606       << SuppressedOverloads;
2607 }
2608 
2609 static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2610                                    const UnresolvedSetImpl &Overloads,
2611                                    bool (*IsPlausibleResult)(QualType)) {
2612   if (!IsPlausibleResult)
2613     return noteOverloads(S, Overloads, Loc);
2614 
2615   UnresolvedSet<2> PlausibleOverloads;
2616   for (OverloadExpr::decls_iterator It = Overloads.begin(),
2617          DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2618     const auto *OverloadDecl = cast<FunctionDecl>(*It);
2619     QualType OverloadResultTy = OverloadDecl->getReturnType();
2620     if (IsPlausibleResult(OverloadResultTy))
2621       PlausibleOverloads.addDecl(It.getDecl());
2622   }
2623   noteOverloads(S, PlausibleOverloads, Loc);
2624 }
2625 
2626 /// Determine whether the given expression can be called by just
2627 /// putting parentheses after it.  Notably, expressions with unary
2628 /// operators can't be because the unary operator will start parsing
2629 /// outside the call.
2630 static bool IsCallableWithAppend(const Expr *E) {
2631   E = E->IgnoreImplicit();
2632   return (!isa<CStyleCastExpr>(E) &&
2633           !isa<UnaryOperator>(E) &&
2634           !isa<BinaryOperator>(E) &&
2635           !isa<CXXOperatorCallExpr>(E));
2636 }
2637 
2638 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2639   if (const auto *UO = dyn_cast<UnaryOperator>(E))
2640     E = UO->getSubExpr();
2641 
2642   if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
2643     if (ULE->getNumDecls() == 0)
2644       return false;
2645 
2646     const NamedDecl *ND = *ULE->decls_begin();
2647     if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2648       return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2649   }
2650   return false;
2651 }
2652 
2653 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2654                                 bool ForceComplain,
2655                                 bool (*IsPlausibleResult)(QualType)) {
2656   SourceLocation Loc = E.get()->getExprLoc();
2657   SourceRange Range = E.get()->getSourceRange();
2658   UnresolvedSet<4> Overloads;
2659 
2660   // If this is a SFINAE context, don't try anything that might trigger ADL
2661   // prematurely.
2662   if (!isSFINAEContext()) {
2663     QualType ZeroArgCallTy;
2664     if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
2665         !ZeroArgCallTy.isNull() &&
2666         (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2667       // At this point, we know E is potentially callable with 0
2668       // arguments and that it returns something of a reasonable type,
2669       // so we can emit a fixit and carry on pretending that E was
2670       // actually a CallExpr.
2671       SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
2672       bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2673       Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2674                     << (IsCallableWithAppend(E.get())
2675                             ? FixItHint::CreateInsertion(ParenInsertionLoc,
2676                                                          "()")
2677                             : FixItHint());
2678       if (!IsMV)
2679         notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2680 
2681       // FIXME: Try this before emitting the fixit, and suppress diagnostics
2682       // while doing so.
2683       E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), std::nullopt,
2684                         Range.getEnd().getLocWithOffset(1));
2685       return true;
2686     }
2687   }
2688   if (!ForceComplain) return false;
2689 
2690   bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2691   Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2692   if (!IsMV)
2693     notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2694   E = ExprError();
2695   return true;
2696 }
2697 
2698 IdentifierInfo *Sema::getSuperIdentifier() const {
2699   if (!Ident_super)
2700     Ident_super = &Context.Idents.get("super");
2701   return Ident_super;
2702 }
2703 
2704 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2705                                    CapturedRegionKind K,
2706                                    unsigned OpenMPCaptureLevel) {
2707   auto *CSI = new CapturedRegionScopeInfo(
2708       getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2709       (getLangOpts().OpenMP && K == CR_OpenMP)
2710           ? OpenMP().getOpenMPNestingLevel()
2711           : 0,
2712       OpenMPCaptureLevel);
2713   CSI->ReturnType = Context.VoidTy;
2714   FunctionScopes.push_back(CSI);
2715   CapturingFunctionScopes++;
2716 }
2717 
2718 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2719   if (FunctionScopes.empty())
2720     return nullptr;
2721 
2722   return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back());
2723 }
2724 
2725 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
2726 Sema::getMismatchingDeleteExpressions() const {
2727   return DeleteExprs;
2728 }
2729 
2730 Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2731     : S(S), OldFPFeaturesState(S.CurFPFeatures),
2732       OldOverrides(S.FpPragmaStack.CurrentValue),
2733       OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2734       OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2735 
2736 Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2737   S.CurFPFeatures = OldFPFeaturesState;
2738   S.FpPragmaStack.CurrentValue = OldOverrides;
2739   S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod);
2740 }
2741 
2742 bool Sema::isDeclaratorFunctionLike(Declarator &D) {
2743   assert(D.getCXXScopeSpec().isSet() &&
2744          "can only be called for qualified names");
2745 
2746   auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
2747                          LookupOrdinaryName, forRedeclarationInCurContext());
2748   DeclContext *DC = computeDeclContext(D.getCXXScopeSpec(),
2749                                        !D.getDeclSpec().isFriendSpecified());
2750   if (!DC)
2751     return false;
2752 
2753   LookupQualifiedName(LR, DC);
2754   bool Result = llvm::all_of(LR, [](Decl *Dcl) {
2755     if (NamedDecl *ND = dyn_cast<NamedDecl>(Dcl)) {
2756       ND = ND->getUnderlyingDecl();
2757       return isa<FunctionDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
2758              isa<UsingDecl>(ND);
2759     }
2760     return false;
2761   });
2762   return Result;
2763 }
2764 
2765 FunctionEffectDifferences::FunctionEffectDifferences(
2766     const FunctionEffectsRef &Old, const FunctionEffectsRef &New) {
2767 
2768   FunctionEffectsRef::iterator POld = Old.begin();
2769   FunctionEffectsRef::iterator OldEnd = Old.end();
2770   FunctionEffectsRef::iterator PNew = New.begin();
2771   FunctionEffectsRef::iterator NewEnd = New.end();
2772 
2773   while (true) {
2774     int cmp = 0;
2775     if (POld == OldEnd) {
2776       if (PNew == NewEnd)
2777         break;
2778       cmp = 1;
2779     } else if (PNew == NewEnd)
2780       cmp = -1;
2781     else {
2782       FunctionEffectWithCondition Old = *POld;
2783       FunctionEffectWithCondition New = *PNew;
2784       if (Old.Effect.kind() < New.Effect.kind())
2785         cmp = -1;
2786       else if (New.Effect.kind() < Old.Effect.kind())
2787         cmp = 1;
2788       else {
2789         cmp = 0;
2790         if (Old.Cond.getCondition() != New.Cond.getCondition()) {
2791           // FIXME: Cases where the expressions are equivalent but
2792           // don't have the same identity.
2793           push_back(FunctionEffectDiff{
2794               Old.Effect.kind(), FunctionEffectDiff::Kind::ConditionMismatch,
2795               Old, New});
2796         }
2797       }
2798     }
2799 
2800     if (cmp < 0) {
2801       // removal
2802       FunctionEffectWithCondition Old = *POld;
2803       push_back(FunctionEffectDiff{
2804           Old.Effect.kind(), FunctionEffectDiff::Kind::Removed, Old, {}});
2805       ++POld;
2806     } else if (cmp > 0) {
2807       // addition
2808       FunctionEffectWithCondition New = *PNew;
2809       push_back(FunctionEffectDiff{
2810           New.Effect.kind(), FunctionEffectDiff::Kind::Added, {}, New});
2811       ++PNew;
2812     } else {
2813       ++POld;
2814       ++PNew;
2815     }
2816   }
2817 }
2818 
2819 bool FunctionEffectDiff::shouldDiagnoseConversion(
2820     QualType SrcType, const FunctionEffectsRef &SrcFX, QualType DstType,
2821     const FunctionEffectsRef &DstFX) const {
2822 
2823   switch (EffectKind) {
2824   case FunctionEffect::Kind::NonAllocating:
2825     // nonallocating can't be added (spoofed) during a conversion, unless we
2826     // have nonblocking.
2827     if (DiffKind == Kind::Added) {
2828       for (const auto &CFE : SrcFX) {
2829         if (CFE.Effect.kind() == FunctionEffect::Kind::NonBlocking)
2830           return false;
2831       }
2832     }
2833     [[fallthrough]];
2834   case FunctionEffect::Kind::NonBlocking:
2835     // nonblocking can't be added (spoofed) during a conversion.
2836     switch (DiffKind) {
2837     case Kind::Added:
2838       return true;
2839     case Kind::Removed:
2840       return false;
2841     case Kind::ConditionMismatch:
2842       // FIXME: Condition mismatches are too coarse right now -- expressions
2843       // which are equivalent but don't have the same identity are detected as
2844       // mismatches. We're going to diagnose those anyhow until expression
2845       // matching is better.
2846       return true;
2847     }
2848   case FunctionEffect::Kind::Blocking:
2849   case FunctionEffect::Kind::Allocating:
2850     return false;
2851   case FunctionEffect::Kind::None:
2852     break;
2853   }
2854   llvm_unreachable("unknown effect kind");
2855 }
2856 
2857 bool FunctionEffectDiff::shouldDiagnoseRedeclaration(
2858     const FunctionDecl &OldFunction, const FunctionEffectsRef &OldFX,
2859     const FunctionDecl &NewFunction, const FunctionEffectsRef &NewFX) const {
2860   switch (EffectKind) {
2861   case FunctionEffect::Kind::NonAllocating:
2862   case FunctionEffect::Kind::NonBlocking:
2863     // nonblocking/nonallocating can't be removed in a redeclaration.
2864     switch (DiffKind) {
2865     case Kind::Added:
2866       return false; // No diagnostic.
2867     case Kind::Removed:
2868       return true; // Issue diagnostic.
2869     case Kind::ConditionMismatch:
2870       // All these forms of mismatches are diagnosed.
2871       return true;
2872     }
2873   case FunctionEffect::Kind::Blocking:
2874   case FunctionEffect::Kind::Allocating:
2875     return false;
2876   case FunctionEffect::Kind::None:
2877     break;
2878   }
2879   llvm_unreachable("unknown effect kind");
2880 }
2881 
2882 FunctionEffectDiff::OverrideResult
2883 FunctionEffectDiff::shouldDiagnoseMethodOverride(
2884     const CXXMethodDecl &OldMethod, const FunctionEffectsRef &OldFX,
2885     const CXXMethodDecl &NewMethod, const FunctionEffectsRef &NewFX) const {
2886   switch (EffectKind) {
2887   case FunctionEffect::Kind::NonAllocating:
2888   case FunctionEffect::Kind::NonBlocking:
2889     switch (DiffKind) {
2890 
2891     // If added on an override, that's fine and not diagnosed.
2892     case Kind::Added:
2893       return OverrideResult::NoAction;
2894 
2895     // If missing from an override (removed), propagate from base to derived.
2896     case Kind::Removed:
2897       return OverrideResult::Merge;
2898 
2899     // If there's a mismatch involving the effect's polarity or condition,
2900     // issue a warning.
2901     case Kind::ConditionMismatch:
2902       return OverrideResult::Warn;
2903     }
2904 
2905   case FunctionEffect::Kind::Blocking:
2906   case FunctionEffect::Kind::Allocating:
2907     return OverrideResult::NoAction;
2908 
2909   case FunctionEffect::Kind::None:
2910     break;
2911   }
2912   llvm_unreachable("unknown effect kind");
2913 }
2914