xref: /freebsd/contrib/llvm-project/clang/lib/Sema/Sema.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the actions class which performs semantic analysis and
10 // builds an AST out of a parse stream.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "UsedDeclVisitor.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/ASTDiagnostic.h"
17 #include "clang/AST/Decl.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/DeclFriend.h"
20 #include "clang/AST/DeclObjC.h"
21 #include "clang/AST/Expr.h"
22 #include "clang/AST/ExprCXX.h"
23 #include "clang/AST/PrettyDeclStackTrace.h"
24 #include "clang/AST/StmtCXX.h"
25 #include "clang/AST/TypeOrdering.h"
26 #include "clang/Basic/DarwinSDKInfo.h"
27 #include "clang/Basic/DiagnosticOptions.h"
28 #include "clang/Basic/PartialDiagnostic.h"
29 #include "clang/Basic/SourceManager.h"
30 #include "clang/Basic/TargetInfo.h"
31 #include "clang/Lex/HeaderSearch.h"
32 #include "clang/Lex/HeaderSearchOptions.h"
33 #include "clang/Lex/Preprocessor.h"
34 #include "clang/Sema/CXXFieldCollector.h"
35 #include "clang/Sema/EnterExpressionEvaluationContext.h"
36 #include "clang/Sema/ExternalSemaSource.h"
37 #include "clang/Sema/Initialization.h"
38 #include "clang/Sema/MultiplexExternalSemaSource.h"
39 #include "clang/Sema/ObjCMethodList.h"
40 #include "clang/Sema/RISCVIntrinsicManager.h"
41 #include "clang/Sema/Scope.h"
42 #include "clang/Sema/ScopeInfo.h"
43 #include "clang/Sema/SemaAMDGPU.h"
44 #include "clang/Sema/SemaARM.h"
45 #include "clang/Sema/SemaAVR.h"
46 #include "clang/Sema/SemaBPF.h"
47 #include "clang/Sema/SemaCUDA.h"
48 #include "clang/Sema/SemaCodeCompletion.h"
49 #include "clang/Sema/SemaConsumer.h"
50 #include "clang/Sema/SemaDirectX.h"
51 #include "clang/Sema/SemaHLSL.h"
52 #include "clang/Sema/SemaHexagon.h"
53 #include "clang/Sema/SemaLoongArch.h"
54 #include "clang/Sema/SemaM68k.h"
55 #include "clang/Sema/SemaMIPS.h"
56 #include "clang/Sema/SemaMSP430.h"
57 #include "clang/Sema/SemaNVPTX.h"
58 #include "clang/Sema/SemaObjC.h"
59 #include "clang/Sema/SemaOpenACC.h"
60 #include "clang/Sema/SemaOpenCL.h"
61 #include "clang/Sema/SemaOpenMP.h"
62 #include "clang/Sema/SemaPPC.h"
63 #include "clang/Sema/SemaPseudoObject.h"
64 #include "clang/Sema/SemaRISCV.h"
65 #include "clang/Sema/SemaSPIRV.h"
66 #include "clang/Sema/SemaSYCL.h"
67 #include "clang/Sema/SemaSwift.h"
68 #include "clang/Sema/SemaSystemZ.h"
69 #include "clang/Sema/SemaWasm.h"
70 #include "clang/Sema/SemaX86.h"
71 #include "clang/Sema/TemplateDeduction.h"
72 #include "clang/Sema/TemplateInstCallback.h"
73 #include "clang/Sema/TypoCorrection.h"
74 #include "llvm/ADT/DenseMap.h"
75 #include "llvm/ADT/STLExtras.h"
76 #include "llvm/ADT/SmallPtrSet.h"
77 #include "llvm/Support/TimeProfiler.h"
78 #include <optional>
79 
80 using namespace clang;
81 using namespace sema;
82 
getLocForEndOfToken(SourceLocation Loc,unsigned Offset)83 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
84   return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts);
85 }
86 
87 SourceRange
getRangeForNextToken(SourceLocation Loc,bool IncludeMacros,bool IncludeComments,std::optional<tok::TokenKind> ExpectedToken)88 Sema::getRangeForNextToken(SourceLocation Loc, bool IncludeMacros,
89                            bool IncludeComments,
90                            std::optional<tok::TokenKind> ExpectedToken) {
91   if (!Loc.isValid())
92     return SourceRange();
93   std::optional<Token> NextToken =
94       Lexer::findNextToken(Loc, SourceMgr, LangOpts, IncludeComments);
95   if (!NextToken)
96     return SourceRange();
97   if (ExpectedToken && NextToken->getKind() != *ExpectedToken)
98     return SourceRange();
99   SourceLocation TokenStart = NextToken->getLocation();
100   SourceLocation TokenEnd = NextToken->getLastLoc();
101   if (!TokenStart.isValid() || !TokenEnd.isValid())
102     return SourceRange();
103   if (!IncludeMacros && (TokenStart.isMacroID() || TokenEnd.isMacroID()))
104     return SourceRange();
105 
106   return SourceRange(TokenStart, TokenEnd);
107 }
108 
getModuleLoader() const109 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
110 
111 DarwinSDKInfo *
getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,StringRef Platform)112 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
113                                               StringRef Platform) {
114   auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
115   if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
116     Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
117         << Platform;
118     WarnedDarwinSDKInfoMissing = true;
119   }
120   return SDKInfo;
121 }
122 
getDarwinSDKInfoForAvailabilityChecking()123 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
124   if (CachedDarwinSDKInfo)
125     return CachedDarwinSDKInfo->get();
126   auto SDKInfo = parseDarwinSDKInfo(
127       PP.getFileManager().getVirtualFileSystem(),
128       PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
129   if (SDKInfo && *SDKInfo) {
130     CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo));
131     return CachedDarwinSDKInfo->get();
132   }
133   if (!SDKInfo)
134     llvm::consumeError(SDKInfo.takeError());
135   CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
136   return nullptr;
137 }
138 
InventAbbreviatedTemplateParameterTypeName(const IdentifierInfo * ParamName,unsigned int Index)139 IdentifierInfo *Sema::InventAbbreviatedTemplateParameterTypeName(
140     const IdentifierInfo *ParamName, unsigned int Index) {
141   std::string InventedName;
142   llvm::raw_string_ostream OS(InventedName);
143 
144   if (!ParamName)
145     OS << "auto:" << Index + 1;
146   else
147     OS << ParamName->getName() << ":auto";
148 
149   return &Context.Idents.get(OS.str());
150 }
151 
getPrintingPolicy(const ASTContext & Context,const Preprocessor & PP)152 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
153                                        const Preprocessor &PP) {
154   PrintingPolicy Policy = Context.getPrintingPolicy();
155   // In diagnostics, we print _Bool as bool if the latter is defined as the
156   // former.
157   Policy.Bool = Context.getLangOpts().Bool;
158   if (!Policy.Bool) {
159     if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) {
160       Policy.Bool = BoolMacro->isObjectLike() &&
161                     BoolMacro->getNumTokens() == 1 &&
162                     BoolMacro->getReplacementToken(0).is(tok::kw__Bool);
163     }
164   }
165 
166   // Shorten the data output if needed
167   Policy.EntireContentsOfLargeArray = false;
168 
169   return Policy;
170 }
171 
ActOnTranslationUnitScope(Scope * S)172 void Sema::ActOnTranslationUnitScope(Scope *S) {
173   TUScope = S;
174   PushDeclContext(S, Context.getTranslationUnitDecl());
175 }
176 
177 namespace clang {
178 namespace sema {
179 
180 class SemaPPCallbacks : public PPCallbacks {
181   Sema *S = nullptr;
182   llvm::SmallVector<SourceLocation, 8> IncludeStack;
183   llvm::SmallVector<llvm::TimeTraceProfilerEntry *, 8> ProfilerStack;
184 
185 public:
set(Sema & S)186   void set(Sema &S) { this->S = &S; }
187 
reset()188   void reset() { S = nullptr; }
189 
FileChanged(SourceLocation Loc,FileChangeReason Reason,SrcMgr::CharacteristicKind FileType,FileID PrevFID)190   void FileChanged(SourceLocation Loc, FileChangeReason Reason,
191                    SrcMgr::CharacteristicKind FileType,
192                    FileID PrevFID) override {
193     if (!S)
194       return;
195     switch (Reason) {
196     case EnterFile: {
197       SourceManager &SM = S->getSourceManager();
198       SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc));
199       if (IncludeLoc.isValid()) {
200         if (llvm::timeTraceProfilerEnabled()) {
201           OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getFileID(Loc));
202           ProfilerStack.push_back(llvm::timeTraceAsyncProfilerBegin(
203               "Source", FE ? FE->getName() : StringRef("<unknown>")));
204         }
205 
206         IncludeStack.push_back(IncludeLoc);
207         S->DiagnoseNonDefaultPragmaAlignPack(
208             Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
209             IncludeLoc);
210       }
211       break;
212     }
213     case ExitFile:
214       if (!IncludeStack.empty()) {
215         if (llvm::timeTraceProfilerEnabled())
216           llvm::timeTraceProfilerEnd(ProfilerStack.pop_back_val());
217 
218         S->DiagnoseNonDefaultPragmaAlignPack(
219             Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
220             IncludeStack.pop_back_val());
221       }
222       break;
223     default:
224       break;
225     }
226   }
PragmaDiagnostic(SourceLocation Loc,StringRef Namespace,diag::Severity Mapping,StringRef Str)227   void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
228                         diag::Severity Mapping, StringRef Str) override {
229     // If one of the analysis-based diagnostics was enabled while processing
230     // a function, we want to note it in the analysis-based warnings so they
231     // can be run at the end of the function body even if the analysis warnings
232     // are disabled at that point.
233     SmallVector<diag::kind, 256> GroupDiags;
234     diag::Flavor Flavor =
235         Str[1] == 'W' ? diag::Flavor::WarningOrError : diag::Flavor::Remark;
236     StringRef Group = Str.substr(2);
237 
238     if (S->PP.getDiagnostics().getDiagnosticIDs()->getDiagnosticsInGroup(
239             Flavor, Group, GroupDiags))
240       return;
241 
242     for (diag::kind K : GroupDiags) {
243       // Note: the cases in this switch should be kept in sync with the
244       // diagnostics in AnalysisBasedWarnings::getPolicyInEffectAt().
245       AnalysisBasedWarnings::Policy &Override =
246           S->AnalysisWarnings.getPolicyOverrides();
247       switch (K) {
248       default: break;
249       case diag::warn_unreachable:
250       case diag::warn_unreachable_break:
251       case diag::warn_unreachable_return:
252       case diag::warn_unreachable_loop_increment:
253         Override.enableCheckUnreachable = true;
254         break;
255       case diag::warn_double_lock:
256         Override.enableThreadSafetyAnalysis = true;
257         break;
258       case diag::warn_use_in_invalid_state:
259         Override.enableConsumedAnalysis = true;
260         break;
261       }
262     }
263   }
264 };
265 
266 } // end namespace sema
267 } // end namespace clang
268 
269 const unsigned Sema::MaxAlignmentExponent;
270 const uint64_t Sema::MaximumAlignment;
271 
Sema(Preprocessor & pp,ASTContext & ctxt,ASTConsumer & consumer,TranslationUnitKind TUKind,CodeCompleteConsumer * CodeCompleter)272 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
273            TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
274     : SemaBase(*this), CollectStats(false), TUKind(TUKind),
275       CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
276       Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
277       SourceMgr(PP.getSourceManager()), APINotes(SourceMgr, LangOpts),
278       AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
279       LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr),
280       OpaqueParser(nullptr), CurContext(nullptr), ExternalSource(nullptr),
281       StackHandler(Diags), CurScope(nullptr), Ident_super(nullptr),
282       AMDGPUPtr(std::make_unique<SemaAMDGPU>(*this)),
283       ARMPtr(std::make_unique<SemaARM>(*this)),
284       AVRPtr(std::make_unique<SemaAVR>(*this)),
285       BPFPtr(std::make_unique<SemaBPF>(*this)),
286       CodeCompletionPtr(
287           std::make_unique<SemaCodeCompletion>(*this, CodeCompleter)),
288       CUDAPtr(std::make_unique<SemaCUDA>(*this)),
289       DirectXPtr(std::make_unique<SemaDirectX>(*this)),
290       HLSLPtr(std::make_unique<SemaHLSL>(*this)),
291       HexagonPtr(std::make_unique<SemaHexagon>(*this)),
292       LoongArchPtr(std::make_unique<SemaLoongArch>(*this)),
293       M68kPtr(std::make_unique<SemaM68k>(*this)),
294       MIPSPtr(std::make_unique<SemaMIPS>(*this)),
295       MSP430Ptr(std::make_unique<SemaMSP430>(*this)),
296       NVPTXPtr(std::make_unique<SemaNVPTX>(*this)),
297       ObjCPtr(std::make_unique<SemaObjC>(*this)),
298       OpenACCPtr(std::make_unique<SemaOpenACC>(*this)),
299       OpenCLPtr(std::make_unique<SemaOpenCL>(*this)),
300       OpenMPPtr(std::make_unique<SemaOpenMP>(*this)),
301       PPCPtr(std::make_unique<SemaPPC>(*this)),
302       PseudoObjectPtr(std::make_unique<SemaPseudoObject>(*this)),
303       RISCVPtr(std::make_unique<SemaRISCV>(*this)),
304       SPIRVPtr(std::make_unique<SemaSPIRV>(*this)),
305       SYCLPtr(std::make_unique<SemaSYCL>(*this)),
306       SwiftPtr(std::make_unique<SemaSwift>(*this)),
307       SystemZPtr(std::make_unique<SemaSystemZ>(*this)),
308       WasmPtr(std::make_unique<SemaWasm>(*this)),
309       X86Ptr(std::make_unique<SemaX86>(*this)),
310       MSPointerToMemberRepresentationMethod(
311           LangOpts.getMSPointerToMemberRepresentationMethod()),
312       MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()),
313       AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
314       DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
315       CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
316       FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
317       VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
318       StdCoroutineTraitsCache(nullptr), IdResolver(pp),
319       OriginalLexicalContext(nullptr), StdInitializerList(nullptr),
320       StdTypeIdentity(nullptr),
321       FullyCheckedComparisonCategories(
322           static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
323       StdSourceLocationImplDecl(nullptr), CXXTypeInfoDecl(nullptr),
324       GlobalNewDeleteDeclared(false), DisableTypoCorrection(false),
325       TyposCorrected(0), IsBuildingRecoveryCallExpr(false), NumSFINAEErrors(0),
326       AccessCheckingSFINAE(false), CurrentInstantiationScope(nullptr),
327       InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
328       ArgPackSubstIndex(std::nullopt), SatisfactionCache(Context) {
329   assert(pp.TUKind == TUKind);
330   TUScope = nullptr;
331 
332   LoadedExternalKnownNamespaces = false;
333   for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
334     ObjC().NSNumberLiteralMethods[I] = nullptr;
335 
336   if (getLangOpts().ObjC)
337     ObjC().NSAPIObj.reset(new NSAPI(Context));
338 
339   if (getLangOpts().CPlusPlus)
340     FieldCollector.reset(new CXXFieldCollector());
341 
342   // Tell diagnostics how to render things from the AST library.
343   Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
344 
345   // This evaluation context exists to ensure that there's always at least one
346   // valid evaluation context available. It is never removed from the
347   // evaluation stack.
348   ExprEvalContexts.emplace_back(
349       ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
350       nullptr, ExpressionEvaluationContextRecord::EK_Other);
351 
352   // Initialization of data sharing attributes stack for OpenMP
353   OpenMP().InitDataSharingAttributesStack();
354 
355   std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
356       std::make_unique<sema::SemaPPCallbacks>();
357   SemaPPCallbackHandler = Callbacks.get();
358   PP.addPPCallbacks(std::move(Callbacks));
359   SemaPPCallbackHandler->set(*this);
360 
361   CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
362 }
363 
364 // Anchor Sema's type info to this TU.
anchor()365 void Sema::anchor() {}
366 
addImplicitTypedef(StringRef Name,QualType T)367 void Sema::addImplicitTypedef(StringRef Name, QualType T) {
368   DeclarationName DN = &Context.Idents.get(Name);
369   if (IdResolver.begin(DN) == IdResolver.end())
370     PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
371 }
372 
Initialize()373 void Sema::Initialize() {
374   // Create BuiltinVaListDecl *before* ExternalSemaSource::InitializeSema(this)
375   // because during initialization ASTReader can emit globals that require
376   // name mangling. And the name mangling uses BuiltinVaListDecl.
377   if (Context.getTargetInfo().hasBuiltinMSVaList())
378     (void)Context.getBuiltinMSVaListDecl();
379   (void)Context.getBuiltinVaListDecl();
380 
381   if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
382     SC->InitializeSema(*this);
383 
384   // Tell the external Sema source about this Sema object.
385   if (ExternalSemaSource *ExternalSema
386       = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
387     ExternalSema->InitializeSema(*this);
388 
389   // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
390   // will not be able to merge any duplicate __va_list_tag decls correctly.
391   VAListTagName = PP.getIdentifierInfo("__va_list_tag");
392 
393   if (!TUScope)
394     return;
395 
396   // Initialize predefined 128-bit integer types, if needed.
397   if (Context.getTargetInfo().hasInt128Type() ||
398       (Context.getAuxTargetInfo() &&
399        Context.getAuxTargetInfo()->hasInt128Type())) {
400     // If either of the 128-bit integer types are unavailable to name lookup,
401     // define them now.
402     DeclarationName Int128 = &Context.Idents.get("__int128_t");
403     if (IdResolver.begin(Int128) == IdResolver.end())
404       PushOnScopeChains(Context.getInt128Decl(), TUScope);
405 
406     DeclarationName UInt128 = &Context.Idents.get("__uint128_t");
407     if (IdResolver.begin(UInt128) == IdResolver.end())
408       PushOnScopeChains(Context.getUInt128Decl(), TUScope);
409   }
410 
411 
412   // Initialize predefined Objective-C types:
413   if (getLangOpts().ObjC) {
414     // If 'SEL' does not yet refer to any declarations, make it refer to the
415     // predefined 'SEL'.
416     DeclarationName SEL = &Context.Idents.get("SEL");
417     if (IdResolver.begin(SEL) == IdResolver.end())
418       PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
419 
420     // If 'id' does not yet refer to any declarations, make it refer to the
421     // predefined 'id'.
422     DeclarationName Id = &Context.Idents.get("id");
423     if (IdResolver.begin(Id) == IdResolver.end())
424       PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
425 
426     // Create the built-in typedef for 'Class'.
427     DeclarationName Class = &Context.Idents.get("Class");
428     if (IdResolver.begin(Class) == IdResolver.end())
429       PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
430 
431     // Create the built-in forward declaratino for 'Protocol'.
432     DeclarationName Protocol = &Context.Idents.get("Protocol");
433     if (IdResolver.begin(Protocol) == IdResolver.end())
434       PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
435   }
436 
437   // Create the internal type for the *StringMakeConstantString builtins.
438   DeclarationName ConstantString = &Context.Idents.get("__NSConstantString");
439   if (IdResolver.begin(ConstantString) == IdResolver.end())
440     PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope);
441 
442   // Initialize Microsoft "predefined C++ types".
443   if (getLangOpts().MSVCCompat) {
444     if (getLangOpts().CPlusPlus &&
445         IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
446       PushOnScopeChains(
447           Context.buildImplicitRecord("type_info", TagTypeKind::Class),
448           TUScope);
449 
450     addImplicitTypedef("size_t", Context.getSizeType());
451   }
452 
453   // Initialize predefined OpenCL types and supported extensions and (optional)
454   // core features.
455   if (getLangOpts().OpenCL) {
456     getOpenCLOptions().addSupport(
457         Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
458     addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
459     addImplicitTypedef("event_t", Context.OCLEventTy);
460     auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
461     if (OCLCompatibleVersion >= 200) {
462       if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
463         addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
464         addImplicitTypedef("queue_t", Context.OCLQueueTy);
465       }
466       if (getLangOpts().OpenCLPipes)
467         addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
468       addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
469       addImplicitTypedef("atomic_uint",
470                          Context.getAtomicType(Context.UnsignedIntTy));
471       addImplicitTypedef("atomic_float",
472                          Context.getAtomicType(Context.FloatTy));
473       // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
474       // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
475       addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy));
476 
477 
478       // OpenCL v2.0 s6.13.11.6:
479       // - The atomic_long and atomic_ulong types are supported if the
480       //   cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
481       //   extensions are supported.
482       // - The atomic_double type is only supported if double precision
483       //   is supported and the cl_khr_int64_base_atomics and
484       //   cl_khr_int64_extended_atomics extensions are supported.
485       // - If the device address space is 64-bits, the data types
486       //   atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
487       //   atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
488       //   cl_khr_int64_extended_atomics extensions are supported.
489 
490       auto AddPointerSizeDependentTypes = [&]() {
491         auto AtomicSizeT = Context.getAtomicType(Context.getSizeType());
492         auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType());
493         auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType());
494         auto AtomicPtrDiffT =
495             Context.getAtomicType(Context.getPointerDiffType());
496         addImplicitTypedef("atomic_size_t", AtomicSizeT);
497         addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT);
498         addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT);
499         addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT);
500       };
501 
502       if (Context.getTypeSize(Context.getSizeType()) == 32) {
503         AddPointerSizeDependentTypes();
504       }
505 
506       if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) {
507         auto AtomicHalfT = Context.getAtomicType(Context.HalfTy);
508         addImplicitTypedef("atomic_half", AtomicHalfT);
509       }
510 
511       std::vector<QualType> Atomic64BitTypes;
512       if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics",
513                                          getLangOpts()) &&
514           getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics",
515                                          getLangOpts())) {
516         if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) {
517           auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy);
518           addImplicitTypedef("atomic_double", AtomicDoubleT);
519           Atomic64BitTypes.push_back(AtomicDoubleT);
520         }
521         auto AtomicLongT = Context.getAtomicType(Context.LongTy);
522         auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy);
523         addImplicitTypedef("atomic_long", AtomicLongT);
524         addImplicitTypedef("atomic_ulong", AtomicULongT);
525 
526 
527         if (Context.getTypeSize(Context.getSizeType()) == 64) {
528           AddPointerSizeDependentTypes();
529         }
530       }
531     }
532 
533 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext)                                      \
534   if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) {                   \
535     addImplicitTypedef(#ExtType, Context.Id##Ty);                              \
536   }
537 #include "clang/Basic/OpenCLExtensionTypes.def"
538   }
539 
540   if (Context.getTargetInfo().hasAArch64ACLETypes() ||
541       (Context.getAuxTargetInfo() &&
542        Context.getAuxTargetInfo()->hasAArch64ACLETypes())) {
543 #define SVE_TYPE(Name, Id, SingletonId)                                        \
544   addImplicitTypedef(#Name, Context.SingletonId);
545 #define NEON_VECTOR_TYPE(Name, BaseType, ElBits, NumEls, VectorKind)           \
546   addImplicitTypedef(                                                          \
547       #Name, Context.getVectorType(Context.BaseType, NumEls, VectorKind));
548 #include "clang/Basic/AArch64ACLETypes.def"
549   }
550 
551   if (Context.getTargetInfo().getTriple().isPPC64()) {
552 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
553       addImplicitTypedef(#Name, Context.Id##Ty);
554 #include "clang/Basic/PPCTypes.def"
555 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
556     addImplicitTypedef(#Name, Context.Id##Ty);
557 #include "clang/Basic/PPCTypes.def"
558   }
559 
560   if (Context.getTargetInfo().hasRISCVVTypes()) {
561 #define RVV_TYPE(Name, Id, SingletonId)                                        \
562   addImplicitTypedef(Name, Context.SingletonId);
563 #include "clang/Basic/RISCVVTypes.def"
564   }
565 
566   if (Context.getTargetInfo().getTriple().isWasm() &&
567       Context.getTargetInfo().hasFeature("reference-types")) {
568 #define WASM_TYPE(Name, Id, SingletonId)                                       \
569   addImplicitTypedef(Name, Context.SingletonId);
570 #include "clang/Basic/WebAssemblyReferenceTypes.def"
571   }
572 
573   if (Context.getTargetInfo().getTriple().isAMDGPU() ||
574       (Context.getAuxTargetInfo() &&
575        Context.getAuxTargetInfo()->getTriple().isAMDGPU())) {
576 #define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align)                       \
577   addImplicitTypedef(Name, Context.SingletonId);
578 #include "clang/Basic/AMDGPUTypes.def"
579   }
580 
581   if (Context.getTargetInfo().hasBuiltinMSVaList()) {
582     DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
583     if (IdResolver.begin(MSVaList) == IdResolver.end())
584       PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
585   }
586 
587   DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list");
588   if (IdResolver.begin(BuiltinVaList) == IdResolver.end())
589     PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
590 }
591 
~Sema()592 Sema::~Sema() {
593   assert(InstantiatingSpecializations.empty() &&
594          "failed to clean up an InstantiatingTemplate?");
595 
596   if (VisContext) FreeVisContext();
597 
598   // Kill all the active scopes.
599   for (sema::FunctionScopeInfo *FSI : FunctionScopes)
600     delete FSI;
601 
602   // Tell the SemaConsumer to forget about us; we're going out of scope.
603   if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
604     SC->ForgetSema();
605 
606   // Detach from the external Sema source.
607   if (ExternalSemaSource *ExternalSema
608         = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
609     ExternalSema->ForgetSema();
610 
611   // Delete cached satisfactions.
612   std::vector<ConstraintSatisfaction *> Satisfactions;
613   Satisfactions.reserve(SatisfactionCache.size());
614   for (auto &Node : SatisfactionCache)
615     Satisfactions.push_back(&Node);
616   for (auto *Node : Satisfactions)
617     delete Node;
618 
619   threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache);
620 
621   // Destroys data sharing attributes stack for OpenMP
622   OpenMP().DestroyDataSharingAttributesStack();
623 
624   // Detach from the PP callback handler which outlives Sema since it's owned
625   // by the preprocessor.
626   SemaPPCallbackHandler->reset();
627 }
628 
runWithSufficientStackSpace(SourceLocation Loc,llvm::function_ref<void ()> Fn)629 void Sema::runWithSufficientStackSpace(SourceLocation Loc,
630                                        llvm::function_ref<void()> Fn) {
631   StackHandler.runWithSufficientStackSpace(Loc, Fn);
632 }
633 
makeUnavailableInSystemHeader(SourceLocation loc,UnavailableAttr::ImplicitReason reason)634 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
635                                       UnavailableAttr::ImplicitReason reason) {
636   // If we're not in a function, it's an error.
637   FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
638   if (!fn) return false;
639 
640   // If we're in template instantiation, it's an error.
641   if (inTemplateInstantiation())
642     return false;
643 
644   // If that function's not in a system header, it's an error.
645   if (!Context.getSourceManager().isInSystemHeader(loc))
646     return false;
647 
648   // If the function is already unavailable, it's not an error.
649   if (fn->hasAttr<UnavailableAttr>()) return true;
650 
651   fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
652   return true;
653 }
654 
getASTMutationListener() const655 ASTMutationListener *Sema::getASTMutationListener() const {
656   return getASTConsumer().GetASTMutationListener();
657 }
658 
addExternalSource(ExternalSemaSource * E)659 void Sema::addExternalSource(ExternalSemaSource *E) {
660   assert(E && "Cannot use with NULL ptr");
661 
662   if (!ExternalSource) {
663     ExternalSource = E;
664     return;
665   }
666 
667   if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(ExternalSource))
668     Ex->AddSource(E);
669   else
670     ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
671 }
672 
PrintStats() const673 void Sema::PrintStats() const {
674   llvm::errs() << "\n*** Semantic Analysis Stats:\n";
675   llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
676 
677   BumpAlloc.PrintStats();
678   AnalysisWarnings.PrintStats();
679 }
680 
diagnoseNullableToNonnullConversion(QualType DstType,QualType SrcType,SourceLocation Loc)681 void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
682                                                QualType SrcType,
683                                                SourceLocation Loc) {
684   std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
685   if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
686                            *ExprNullability != NullabilityKind::NullableResult))
687     return;
688 
689   std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
690   if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
691     return;
692 
693   Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
694 }
695 
696 // Generate diagnostics when adding or removing effects in a type conversion.
diagnoseFunctionEffectConversion(QualType DstType,QualType SrcType,SourceLocation Loc)697 void Sema::diagnoseFunctionEffectConversion(QualType DstType, QualType SrcType,
698                                             SourceLocation Loc) {
699   const auto SrcFX = FunctionEffectsRef::get(SrcType);
700   const auto DstFX = FunctionEffectsRef::get(DstType);
701   if (SrcFX != DstFX) {
702     for (const auto &Diff : FunctionEffectDiffVector(SrcFX, DstFX)) {
703       if (Diff.shouldDiagnoseConversion(SrcType, SrcFX, DstType, DstFX))
704         Diag(Loc, diag::warn_invalid_add_func_effects) << Diff.effectName();
705     }
706   }
707 }
708 
diagnoseZeroToNullptrConversion(CastKind Kind,const Expr * E)709 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
710   // nullptr only exists from C++11 on, so don't warn on its absence earlier.
711   if (!getLangOpts().CPlusPlus11)
712     return;
713 
714   if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
715     return;
716 
717   const Expr *EStripped = E->IgnoreParenImpCasts();
718   if (EStripped->getType()->isNullPtrType())
719     return;
720   if (isa<GNUNullExpr>(EStripped))
721     return;
722 
723   if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
724                       E->getBeginLoc()))
725     return;
726 
727   // Don't diagnose the conversion from a 0 literal to a null pointer argument
728   // in a synthesized call to operator<=>.
729   if (!CodeSynthesisContexts.empty() &&
730       CodeSynthesisContexts.back().Kind ==
731           CodeSynthesisContext::RewritingOperatorAsSpaceship)
732     return;
733 
734   // Ignore null pointers in defaulted comparison operators.
735   FunctionDecl *FD = getCurFunctionDecl();
736   if (FD && FD->isDefaulted()) {
737     return;
738   }
739 
740   // If it is a macro from system header, and if the macro name is not "NULL",
741   // do not warn.
742   // Note that uses of "NULL" will be ignored above on systems that define it
743   // as __null.
744   SourceLocation MaybeMacroLoc = E->getBeginLoc();
745   if (Diags.getSuppressSystemWarnings() &&
746       SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
747       !findMacroSpelling(MaybeMacroLoc, "NULL"))
748     return;
749 
750   Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
751       << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
752 }
753 
754 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
755 /// If there is already an implicit cast, merge into the existing one.
756 /// The result is of the given category.
ImpCastExprToType(Expr * E,QualType Ty,CastKind Kind,ExprValueKind VK,const CXXCastPath * BasePath,CheckedConversionKind CCK)757 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
758                                    CastKind Kind, ExprValueKind VK,
759                                    const CXXCastPath *BasePath,
760                                    CheckedConversionKind CCK) {
761 #ifndef NDEBUG
762   if (VK == VK_PRValue && !E->isPRValue()) {
763     switch (Kind) {
764     default:
765       llvm_unreachable(
766           ("can't implicitly cast glvalue to prvalue with this cast "
767            "kind: " +
768            std::string(CastExpr::getCastKindName(Kind)))
769               .c_str());
770     case CK_Dependent:
771     case CK_LValueToRValue:
772     case CK_ArrayToPointerDecay:
773     case CK_FunctionToPointerDecay:
774     case CK_ToVoid:
775     case CK_NonAtomicToAtomic:
776     case CK_HLSLArrayRValue:
777     case CK_HLSLAggregateSplatCast:
778       break;
779     }
780   }
781   assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
782          "can't cast prvalue to glvalue");
783 #endif
784 
785   diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
786   diagnoseZeroToNullptrConversion(Kind, E);
787   if (Context.hasAnyFunctionEffects() && !isCast(CCK) &&
788       Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
789     diagnoseFunctionEffectConversion(Ty, E->getType(), E->getBeginLoc());
790 
791   QualType ExprTy = Context.getCanonicalType(E->getType());
792   QualType TypeTy = Context.getCanonicalType(Ty);
793 
794   // This cast is used in place of a regular LValue to RValue cast for
795   // HLSL Array Parameter Types. It needs to be emitted even if
796   // ExprTy == TypeTy, except if E is an HLSLOutArgExpr
797   // Emitting a cast in that case will prevent HLSLOutArgExpr from
798   // being handled properly in EmitCallArg
799   if (Kind == CK_HLSLArrayRValue && !isa<HLSLOutArgExpr>(E))
800     return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK,
801                                     CurFPFeatureOverrides());
802 
803   if (ExprTy == TypeTy)
804     return E;
805 
806   if (Kind == CK_ArrayToPointerDecay) {
807     // C++1z [conv.array]: The temporary materialization conversion is applied.
808     // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
809     if (getLangOpts().CPlusPlus && E->isPRValue()) {
810       // The temporary is an lvalue in C++98 and an xvalue otherwise.
811       ExprResult Materialized = CreateMaterializeTemporaryExpr(
812           E->getType(), E, !getLangOpts().CPlusPlus11);
813       if (Materialized.isInvalid())
814         return ExprError();
815       E = Materialized.get();
816     }
817     // C17 6.7.1p6 footnote 124: The implementation can treat any register
818     // declaration simply as an auto declaration. However, whether or not
819     // addressable storage is actually used, the address of any part of an
820     // object declared with storage-class specifier register cannot be
821     // computed, either explicitly(by use of the unary & operator as discussed
822     // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
823     // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
824     // array declared with storage-class specifier register is sizeof.
825     if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
826       if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
827         if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
828           if (VD->getStorageClass() == SC_Register) {
829             Diag(E->getExprLoc(), diag::err_typecheck_address_of)
830                 << /*register variable*/ 3 << E->getSourceRange();
831             return ExprError();
832           }
833         }
834       }
835     }
836   }
837 
838   if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
839     if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
840       ImpCast->setType(Ty);
841       ImpCast->setValueKind(VK);
842       return E;
843     }
844   }
845 
846   return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK,
847                                   CurFPFeatureOverrides());
848 }
849 
ScalarTypeToBooleanCastKind(QualType ScalarTy)850 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
851   switch (ScalarTy->getScalarTypeKind()) {
852   case Type::STK_Bool: return CK_NoOp;
853   case Type::STK_CPointer: return CK_PointerToBoolean;
854   case Type::STK_BlockPointer: return CK_PointerToBoolean;
855   case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
856   case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
857   case Type::STK_Integral: return CK_IntegralToBoolean;
858   case Type::STK_Floating: return CK_FloatingToBoolean;
859   case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
860   case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
861   case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
862   }
863   llvm_unreachable("unknown scalar type kind");
864 }
865 
866 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
ShouldRemoveFromUnused(Sema * SemaRef,const DeclaratorDecl * D)867 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
868   if (D->getMostRecentDecl()->isUsed())
869     return true;
870 
871   if (D->isExternallyVisible())
872     return true;
873 
874   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
875     // If this is a function template and none of its specializations is used,
876     // we should warn.
877     if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
878       for (const auto *Spec : Template->specializations())
879         if (ShouldRemoveFromUnused(SemaRef, Spec))
880           return true;
881 
882     // UnusedFileScopedDecls stores the first declaration.
883     // The declaration may have become definition so check again.
884     const FunctionDecl *DeclToCheck;
885     if (FD->hasBody(DeclToCheck))
886       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
887 
888     // Later redecls may add new information resulting in not having to warn,
889     // so check again.
890     DeclToCheck = FD->getMostRecentDecl();
891     if (DeclToCheck != FD)
892       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
893   }
894 
895   if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
896     // If a variable usable in constant expressions is referenced,
897     // don't warn if it isn't used: if the value of a variable is required
898     // for the computation of a constant expression, it doesn't make sense to
899     // warn even if the variable isn't odr-used.  (isReferenced doesn't
900     // precisely reflect that, but it's a decent approximation.)
901     if (VD->isReferenced() &&
902         VD->mightBeUsableInConstantExpressions(SemaRef->Context))
903       return true;
904 
905     if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
906       // If this is a variable template and none of its specializations is used,
907       // we should warn.
908       for (const auto *Spec : Template->specializations())
909         if (ShouldRemoveFromUnused(SemaRef, Spec))
910           return true;
911 
912     // UnusedFileScopedDecls stores the first declaration.
913     // The declaration may have become definition so check again.
914     const VarDecl *DeclToCheck = VD->getDefinition();
915     if (DeclToCheck)
916       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
917 
918     // Later redecls may add new information resulting in not having to warn,
919     // so check again.
920     DeclToCheck = VD->getMostRecentDecl();
921     if (DeclToCheck != VD)
922       return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
923   }
924 
925   return false;
926 }
927 
isFunctionOrVarDeclExternC(const NamedDecl * ND)928 static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
929   if (const auto *FD = dyn_cast<FunctionDecl>(ND))
930     return FD->isExternC();
931   return cast<VarDecl>(ND)->isExternC();
932 }
933 
934 /// Determine whether ND is an external-linkage function or variable whose
935 /// type has no linkage.
isExternalWithNoLinkageType(const ValueDecl * VD) const936 bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
937   // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
938   // because we also want to catch the case where its type has VisibleNoLinkage,
939   // which does not affect the linkage of VD.
940   return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
941          !isExternalFormalLinkage(VD->getType()->getLinkage()) &&
942          !isFunctionOrVarDeclExternC(VD);
943 }
944 
945 /// Obtains a sorted list of functions and variables that are undefined but
946 /// ODR-used.
getUndefinedButUsed(SmallVectorImpl<std::pair<NamedDecl *,SourceLocation>> & Undefined)947 void Sema::getUndefinedButUsed(
948     SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
949   for (const auto &UndefinedUse : UndefinedButUsed) {
950     NamedDecl *ND = UndefinedUse.first;
951 
952     // Ignore attributes that have become invalid.
953     if (ND->isInvalidDecl()) continue;
954 
955     // __attribute__((weakref)) is basically a definition.
956     if (ND->hasAttr<WeakRefAttr>()) continue;
957 
958     if (isa<CXXDeductionGuideDecl>(ND))
959       continue;
960 
961     if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
962       // An exported function will always be emitted when defined, so even if
963       // the function is inline, it doesn't have to be emitted in this TU. An
964       // imported function implies that it has been exported somewhere else.
965       continue;
966     }
967 
968     if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
969       if (FD->isDefined())
970         continue;
971       if (FD->isExternallyVisible() &&
972           !isExternalWithNoLinkageType(FD) &&
973           !FD->getMostRecentDecl()->isInlined() &&
974           !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
975         continue;
976       if (FD->getBuiltinID())
977         continue;
978     } else {
979       const auto *VD = cast<VarDecl>(ND);
980       if (VD->hasDefinition() != VarDecl::DeclarationOnly)
981         continue;
982       if (VD->isExternallyVisible() &&
983           !isExternalWithNoLinkageType(VD) &&
984           !VD->getMostRecentDecl()->isInline() &&
985           !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
986         continue;
987 
988       // Skip VarDecls that lack formal definitions but which we know are in
989       // fact defined somewhere.
990       if (VD->isKnownToBeDefined())
991         continue;
992     }
993 
994     Undefined.push_back(std::make_pair(ND, UndefinedUse.second));
995   }
996 }
997 
998 /// checkUndefinedButUsed - Check for undefined objects with internal linkage
999 /// or that are inline.
checkUndefinedButUsed(Sema & S)1000 static void checkUndefinedButUsed(Sema &S) {
1001   if (S.UndefinedButUsed.empty()) return;
1002 
1003   // Collect all the still-undefined entities with internal linkage.
1004   SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
1005   S.getUndefinedButUsed(Undefined);
1006   S.UndefinedButUsed.clear();
1007   if (Undefined.empty()) return;
1008 
1009   for (const auto &Undef : Undefined) {
1010     ValueDecl *VD = cast<ValueDecl>(Undef.first);
1011     SourceLocation UseLoc = Undef.second;
1012 
1013     if (S.isExternalWithNoLinkageType(VD)) {
1014       // C++ [basic.link]p8:
1015       //   A type without linkage shall not be used as the type of a variable
1016       //   or function with external linkage unless
1017       //    -- the entity has C language linkage
1018       //    -- the entity is not odr-used or is defined in the same TU
1019       //
1020       // As an extension, accept this in cases where the type is externally
1021       // visible, since the function or variable actually can be defined in
1022       // another translation unit in that case.
1023       S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage())
1024                                     ? diag::ext_undefined_internal_type
1025                                     : diag::err_undefined_internal_type)
1026         << isa<VarDecl>(VD) << VD;
1027     } else if (!VD->isExternallyVisible()) {
1028       // FIXME: We can promote this to an error. The function or variable can't
1029       // be defined anywhere else, so the program must necessarily violate the
1030       // one definition rule.
1031       bool IsImplicitBase = false;
1032       if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) {
1033         auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
1034         if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
1035                           llvm::omp::TraitProperty::
1036                               implementation_extension_disable_implicit_base)) {
1037           const auto *Func = cast<FunctionDecl>(
1038               cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl());
1039           IsImplicitBase = BaseD->isImplicit() &&
1040                            Func->getIdentifier()->isMangledOpenMPVariantName();
1041         }
1042       }
1043       if (!S.getLangOpts().OpenMP || !IsImplicitBase)
1044         S.Diag(VD->getLocation(), diag::warn_undefined_internal)
1045             << isa<VarDecl>(VD) << VD;
1046     } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
1047       (void)FD;
1048       assert(FD->getMostRecentDecl()->isInlined() &&
1049              "used object requires definition but isn't inline or internal?");
1050       // FIXME: This is ill-formed; we should reject.
1051       S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD;
1052     } else {
1053       assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
1054              "used var requires definition but isn't inline or internal?");
1055       S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD;
1056     }
1057     if (UseLoc.isValid())
1058       S.Diag(UseLoc, diag::note_used_here);
1059   }
1060 }
1061 
LoadExternalWeakUndeclaredIdentifiers()1062 void Sema::LoadExternalWeakUndeclaredIdentifiers() {
1063   if (!ExternalSource)
1064     return;
1065 
1066   SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
1067   ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
1068   for (auto &WeakID : WeakIDs)
1069     (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second);
1070 }
1071 
1072 
1073 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
1074 
1075 /// Returns true, if all methods and nested classes of the given
1076 /// CXXRecordDecl are defined in this translation unit.
1077 ///
1078 /// Should only be called from ActOnEndOfTranslationUnit so that all
1079 /// definitions are actually read.
MethodsAndNestedClassesComplete(const CXXRecordDecl * RD,RecordCompleteMap & MNCComplete)1080 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
1081                                             RecordCompleteMap &MNCComplete) {
1082   RecordCompleteMap::iterator Cache = MNCComplete.find(RD);
1083   if (Cache != MNCComplete.end())
1084     return Cache->second;
1085   if (!RD->isCompleteDefinition())
1086     return false;
1087   bool Complete = true;
1088   for (DeclContext::decl_iterator I = RD->decls_begin(),
1089                                   E = RD->decls_end();
1090        I != E && Complete; ++I) {
1091     if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
1092       Complete = M->isDefined() || M->isDefaulted() ||
1093                  (M->isPureVirtual() && !isa<CXXDestructorDecl>(M));
1094     else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
1095       // If the template function is marked as late template parsed at this
1096       // point, it has not been instantiated and therefore we have not
1097       // performed semantic analysis on it yet, so we cannot know if the type
1098       // can be considered complete.
1099       Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
1100                   F->getTemplatedDecl()->isDefined();
1101     else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) {
1102       if (R->isInjectedClassName())
1103         continue;
1104       if (R->hasDefinition())
1105         Complete = MethodsAndNestedClassesComplete(R->getDefinition(),
1106                                                    MNCComplete);
1107       else
1108         Complete = false;
1109     }
1110   }
1111   MNCComplete[RD] = Complete;
1112   return Complete;
1113 }
1114 
1115 /// Returns true, if the given CXXRecordDecl is fully defined in this
1116 /// translation unit, i.e. all methods are defined or pure virtual and all
1117 /// friends, friend functions and nested classes are fully defined in this
1118 /// translation unit.
1119 ///
1120 /// Should only be called from ActOnEndOfTranslationUnit so that all
1121 /// definitions are actually read.
IsRecordFullyDefined(const CXXRecordDecl * RD,RecordCompleteMap & RecordsComplete,RecordCompleteMap & MNCComplete)1122 static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
1123                                  RecordCompleteMap &RecordsComplete,
1124                                  RecordCompleteMap &MNCComplete) {
1125   RecordCompleteMap::iterator Cache = RecordsComplete.find(RD);
1126   if (Cache != RecordsComplete.end())
1127     return Cache->second;
1128   bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
1129   for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
1130                                       E = RD->friend_end();
1131        I != E && Complete; ++I) {
1132     // Check if friend classes and methods are complete.
1133     if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
1134       // Friend classes are available as the TypeSourceInfo of the FriendDecl.
1135       if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
1136         Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete);
1137       else
1138         Complete = false;
1139     } else {
1140       // Friend functions are available through the NamedDecl of FriendDecl.
1141       if (const FunctionDecl *FD =
1142           dyn_cast<FunctionDecl>((*I)->getFriendDecl()))
1143         Complete = FD->isDefined();
1144       else
1145         // This is a template friend, give up.
1146         Complete = false;
1147     }
1148   }
1149   RecordsComplete[RD] = Complete;
1150   return Complete;
1151 }
1152 
emitAndClearUnusedLocalTypedefWarnings()1153 void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1154   if (ExternalSource)
1155     ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1156         UnusedLocalTypedefNameCandidates);
1157   for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1158     if (TD->isReferenced())
1159       continue;
1160     Diag(TD->getLocation(), diag::warn_unused_local_typedef)
1161         << isa<TypeAliasDecl>(TD) << TD->getDeclName();
1162   }
1163   UnusedLocalTypedefNameCandidates.clear();
1164 }
1165 
ActOnStartOfTranslationUnit()1166 void Sema::ActOnStartOfTranslationUnit() {
1167   if (getLangOpts().CPlusPlusModules &&
1168       getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1169     HandleStartOfHeaderUnit();
1170 }
1171 
ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind)1172 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1173   if (Kind == TUFragmentKind::Global) {
1174     // Perform Pending Instantiations at the end of global module fragment so
1175     // that the module ownership of TU-level decls won't get messed.
1176     llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1177     PerformPendingInstantiations();
1178     return;
1179   }
1180 
1181   // Transfer late parsed template instantiations over to the pending template
1182   // instantiation list. During normal compilation, the late template parser
1183   // will be installed and instantiating these templates will succeed.
1184   //
1185   // If we are building a TU prefix for serialization, it is also safe to
1186   // transfer these over, even though they are not parsed. The end of the TU
1187   // should be outside of any eager template instantiation scope, so when this
1188   // AST is deserialized, these templates will not be parsed until the end of
1189   // the combined TU.
1190   PendingInstantiations.insert(PendingInstantiations.end(),
1191                                LateParsedInstantiations.begin(),
1192                                LateParsedInstantiations.end());
1193   LateParsedInstantiations.clear();
1194 
1195   // If DefinedUsedVTables ends up marking any virtual member functions it
1196   // might lead to more pending template instantiations, which we then need
1197   // to instantiate.
1198   DefineUsedVTables();
1199 
1200   // C++: Perform implicit template instantiations.
1201   //
1202   // FIXME: When we perform these implicit instantiations, we do not
1203   // carefully keep track of the point of instantiation (C++ [temp.point]).
1204   // This means that name lookup that occurs within the template
1205   // instantiation will always happen at the end of the translation unit,
1206   // so it will find some names that are not required to be found. This is
1207   // valid, but we could do better by diagnosing if an instantiation uses a
1208   // name that was not visible at its first point of instantiation.
1209   if (ExternalSource) {
1210     // Load pending instantiations from the external source.
1211     SmallVector<PendingImplicitInstantiation, 4> Pending;
1212     ExternalSource->ReadPendingInstantiations(Pending);
1213     for (auto PII : Pending)
1214       if (auto Func = dyn_cast<FunctionDecl>(PII.first))
1215         Func->setInstantiationIsPending(true);
1216     PendingInstantiations.insert(PendingInstantiations.begin(),
1217                                  Pending.begin(), Pending.end());
1218   }
1219 
1220   {
1221     llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1222     PerformPendingInstantiations();
1223   }
1224 
1225   emitDeferredDiags();
1226 
1227   assert(LateParsedInstantiations.empty() &&
1228          "end of TU template instantiation should not create more "
1229          "late-parsed templates");
1230 }
1231 
ActOnEndOfTranslationUnit()1232 void Sema::ActOnEndOfTranslationUnit() {
1233   assert(DelayedDiagnostics.getCurrentPool() == nullptr
1234          && "reached end of translation unit with a pool attached?");
1235 
1236   // If code completion is enabled, don't perform any end-of-translation-unit
1237   // work.
1238   if (PP.isCodeCompletionEnabled())
1239     return;
1240 
1241   // Complete translation units and modules define vtables and perform implicit
1242   // instantiations. PCH files do not.
1243   if (TUKind != TU_Prefix) {
1244     ObjC().DiagnoseUseOfUnimplementedSelectors();
1245 
1246     ActOnEndOfTranslationUnitFragment(
1247         !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1248                                      Module::PrivateModuleFragment
1249             ? TUFragmentKind::Private
1250             : TUFragmentKind::Normal);
1251 
1252     if (LateTemplateParserCleanup)
1253       LateTemplateParserCleanup(OpaqueParser);
1254 
1255     CheckDelayedMemberExceptionSpecs();
1256   } else {
1257     // If we are building a TU prefix for serialization, it is safe to transfer
1258     // these over, even though they are not parsed. The end of the TU should be
1259     // outside of any eager template instantiation scope, so when this AST is
1260     // deserialized, these templates will not be parsed until the end of the
1261     // combined TU.
1262     PendingInstantiations.insert(PendingInstantiations.end(),
1263                                  LateParsedInstantiations.begin(),
1264                                  LateParsedInstantiations.end());
1265     LateParsedInstantiations.clear();
1266 
1267     if (LangOpts.PCHInstantiateTemplates) {
1268       llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1269       PerformPendingInstantiations();
1270     }
1271   }
1272 
1273   DiagnoseUnterminatedPragmaAlignPack();
1274   DiagnoseUnterminatedPragmaAttribute();
1275   OpenMP().DiagnoseUnterminatedOpenMPDeclareTarget();
1276   DiagnosePrecisionLossInComplexDivision();
1277 
1278   // All delayed member exception specs should be checked or we end up accepting
1279   // incompatible declarations.
1280   assert(DelayedOverridingExceptionSpecChecks.empty());
1281   assert(DelayedEquivalentExceptionSpecChecks.empty());
1282 
1283   // All dllexport classes should have been processed already.
1284   assert(DelayedDllExportClasses.empty());
1285   assert(DelayedDllExportMemberFunctions.empty());
1286 
1287   // Remove file scoped decls that turned out to be used.
1288   UnusedFileScopedDecls.erase(
1289       std::remove_if(UnusedFileScopedDecls.begin(nullptr, true),
1290                      UnusedFileScopedDecls.end(),
1291                      [this](const DeclaratorDecl *DD) {
1292                        return ShouldRemoveFromUnused(this, DD);
1293                      }),
1294       UnusedFileScopedDecls.end());
1295 
1296   if (TUKind == TU_Prefix) {
1297     // Translation unit prefixes don't need any of the checking below.
1298     if (!PP.isIncrementalProcessingEnabled())
1299       TUScope = nullptr;
1300     return;
1301   }
1302 
1303   // Check for #pragma weak identifiers that were never declared
1304   LoadExternalWeakUndeclaredIdentifiers();
1305   for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1306     if (WeakIDs.second.empty())
1307       continue;
1308 
1309     Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(),
1310                                       LookupOrdinaryName);
1311     if (PrevDecl != nullptr &&
1312         !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
1313       for (const auto &WI : WeakIDs.second)
1314         Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
1315             << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
1316     else
1317       for (const auto &WI : WeakIDs.second)
1318         Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
1319             << WeakIDs.first;
1320   }
1321 
1322   if (LangOpts.CPlusPlus11 &&
1323       !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
1324     CheckDelegatingCtorCycles();
1325 
1326   if (!Diags.hasErrorOccurred()) {
1327     if (ExternalSource)
1328       ExternalSource->ReadUndefinedButUsed(UndefinedButUsed);
1329     checkUndefinedButUsed(*this);
1330   }
1331 
1332   // A global-module-fragment is only permitted within a module unit.
1333   if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1334                                    Module::ExplicitGlobalModuleFragment) {
1335     Diag(ModuleScopes.back().BeginLoc,
1336          diag::err_module_declaration_missing_after_global_module_introducer);
1337   } else if (getLangOpts().getCompilingModule() ==
1338                  LangOptions::CMK_ModuleInterface &&
1339              // We can't use ModuleScopes here since ModuleScopes is always
1340              // empty if we're compiling the BMI.
1341              !getASTContext().getCurrentNamedModule()) {
1342     // If we are building a module interface unit, we should have seen the
1343     // module declaration.
1344     //
1345     // FIXME: Make a better guess as to where to put the module declaration.
1346     Diag(getSourceManager().getLocForStartOfFile(
1347              getSourceManager().getMainFileID()),
1348          diag::err_module_declaration_missing);
1349   }
1350 
1351   // Now we can decide whether the modules we're building need an initializer.
1352   if (Module *CurrentModule = getCurrentModule();
1353       CurrentModule && CurrentModule->isInterfaceOrPartition()) {
1354     auto DoesModNeedInit = [this](Module *M) {
1355       if (!getASTContext().getModuleInitializers(M).empty())
1356         return true;
1357       for (auto [Exported, _] : M->Exports)
1358         if (Exported->isNamedModuleInterfaceHasInit())
1359           return true;
1360       for (Module *I : M->Imports)
1361         if (I->isNamedModuleInterfaceHasInit())
1362           return true;
1363 
1364       return false;
1365     };
1366 
1367     CurrentModule->NamedModuleHasInit =
1368         DoesModNeedInit(CurrentModule) ||
1369         llvm::any_of(CurrentModule->submodules(), DoesModNeedInit);
1370   }
1371 
1372   if (TUKind == TU_ClangModule) {
1373     // If we are building a module, resolve all of the exported declarations
1374     // now.
1375     if (Module *CurrentModule = PP.getCurrentModule()) {
1376       ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1377 
1378       SmallVector<Module *, 2> Stack;
1379       Stack.push_back(CurrentModule);
1380       while (!Stack.empty()) {
1381         Module *Mod = Stack.pop_back_val();
1382 
1383         // Resolve the exported declarations and conflicts.
1384         // FIXME: Actually complain, once we figure out how to teach the
1385         // diagnostic client to deal with complaints in the module map at this
1386         // point.
1387         ModMap.resolveExports(Mod, /*Complain=*/false);
1388         ModMap.resolveUses(Mod, /*Complain=*/false);
1389         ModMap.resolveConflicts(Mod, /*Complain=*/false);
1390 
1391         // Queue the submodules, so their exports will also be resolved.
1392         auto SubmodulesRange = Mod->submodules();
1393         Stack.append(SubmodulesRange.begin(), SubmodulesRange.end());
1394       }
1395     }
1396 
1397     // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1398     // modules when they are built, not every time they are used.
1399     emitAndClearUnusedLocalTypedefWarnings();
1400   }
1401 
1402   // C++ standard modules. Diagnose cases where a function is declared inline
1403   // in the module purview but has no definition before the end of the TU or
1404   // the start of a Private Module Fragment (if one is present).
1405   if (!PendingInlineFuncDecls.empty()) {
1406     for (auto *D : PendingInlineFuncDecls) {
1407       if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1408         bool DefInPMF = false;
1409         if (auto *FDD = FD->getDefinition()) {
1410           DefInPMF = FDD->getOwningModule()->isPrivateModule();
1411           if (!DefInPMF)
1412             continue;
1413         }
1414         Diag(FD->getLocation(), diag::err_export_inline_not_defined)
1415             << DefInPMF;
1416         // If we have a PMF it should be at the end of the ModuleScopes.
1417         if (DefInPMF &&
1418             ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1419           Diag(ModuleScopes.back().BeginLoc,
1420                diag::note_private_module_fragment);
1421         }
1422       }
1423     }
1424     PendingInlineFuncDecls.clear();
1425   }
1426 
1427   // C99 6.9.2p2:
1428   //   A declaration of an identifier for an object that has file
1429   //   scope without an initializer, and without a storage-class
1430   //   specifier or with the storage-class specifier static,
1431   //   constitutes a tentative definition. If a translation unit
1432   //   contains one or more tentative definitions for an identifier,
1433   //   and the translation unit contains no external definition for
1434   //   that identifier, then the behavior is exactly as if the
1435   //   translation unit contains a file scope declaration of that
1436   //   identifier, with the composite type as of the end of the
1437   //   translation unit, with an initializer equal to 0.
1438   llvm::SmallSet<VarDecl *, 32> Seen;
1439   for (TentativeDefinitionsType::iterator
1440            T = TentativeDefinitions.begin(ExternalSource.get()),
1441            TEnd = TentativeDefinitions.end();
1442        T != TEnd; ++T) {
1443     VarDecl *VD = (*T)->getActingDefinition();
1444 
1445     // If the tentative definition was completed, getActingDefinition() returns
1446     // null. If we've already seen this variable before, insert()'s second
1447     // return value is false.
1448     if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second)
1449       continue;
1450 
1451     if (const IncompleteArrayType *ArrayT
1452         = Context.getAsIncompleteArrayType(VD->getType())) {
1453       // Set the length of the array to 1 (C99 6.9.2p5).
1454       Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
1455       llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
1456       QualType T = Context.getConstantArrayType(
1457           ArrayT->getElementType(), One, nullptr, ArraySizeModifier::Normal, 0);
1458       VD->setType(T);
1459     } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
1460                                    diag::err_tentative_def_incomplete_type))
1461       VD->setInvalidDecl();
1462 
1463     // No initialization is performed for a tentative definition.
1464     CheckCompleteVariableDeclaration(VD);
1465 
1466     // In C, if the definition is const-qualified and has no initializer, it
1467     // is left uninitialized unless it has static or thread storage duration.
1468     QualType Type = VD->getType();
1469     if (!VD->isInvalidDecl() && !getLangOpts().CPlusPlus &&
1470         Type.isConstQualified() && !VD->getAnyInitializer()) {
1471       unsigned DiagID = diag::warn_default_init_const_unsafe;
1472       if (VD->getStorageDuration() == SD_Static ||
1473           VD->getStorageDuration() == SD_Thread)
1474         DiagID = diag::warn_default_init_const;
1475 
1476       bool EmitCppCompat = !Diags.isIgnored(
1477           diag::warn_cxx_compat_hack_fake_diagnostic_do_not_emit,
1478           VD->getLocation());
1479 
1480       Diag(VD->getLocation(), DiagID) << Type << EmitCppCompat;
1481     }
1482 
1483     // Notify the consumer that we've completed a tentative definition.
1484     if (!VD->isInvalidDecl())
1485       Consumer.CompleteTentativeDefinition(VD);
1486   }
1487 
1488   for (auto *D : ExternalDeclarations) {
1489     if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1490       continue;
1491 
1492     Consumer.CompleteExternalDeclaration(D);
1493   }
1494 
1495   if (LangOpts.HLSL)
1496     HLSL().ActOnEndOfTranslationUnit(getASTContext().getTranslationUnitDecl());
1497 
1498   // If there were errors, disable 'unused' warnings since they will mostly be
1499   // noise. Don't warn for a use from a module: either we should warn on all
1500   // file-scope declarations in modules or not at all, but whether the
1501   // declaration is used is immaterial.
1502   if (!Diags.hasErrorOccurred() && TUKind != TU_ClangModule) {
1503     // Output warning for unused file scoped decls.
1504     for (UnusedFileScopedDeclsType::iterator
1505              I = UnusedFileScopedDecls.begin(ExternalSource.get()),
1506              E = UnusedFileScopedDecls.end();
1507          I != E; ++I) {
1508       if (ShouldRemoveFromUnused(this, *I))
1509         continue;
1510 
1511       if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
1512         const FunctionDecl *DiagD;
1513         if (!FD->hasBody(DiagD))
1514           DiagD = FD;
1515         if (DiagD->isDeleted())
1516           continue; // Deleted functions are supposed to be unused.
1517         SourceRange DiagRange = DiagD->getLocation();
1518         if (const ASTTemplateArgumentListInfo *ASTTAL =
1519                 DiagD->getTemplateSpecializationArgsAsWritten())
1520           DiagRange.setEnd(ASTTAL->RAngleLoc);
1521         if (DiagD->isReferenced()) {
1522           if (isa<CXXMethodDecl>(DiagD))
1523             Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
1524                 << DiagD << DiagRange;
1525           else {
1526             if (FD->getStorageClass() == SC_Static &&
1527                 !FD->isInlineSpecified() &&
1528                 !SourceMgr.isInMainFile(
1529                    SourceMgr.getExpansionLoc(FD->getLocation())))
1530               Diag(DiagD->getLocation(),
1531                    diag::warn_unneeded_static_internal_decl)
1532                   << DiagD << DiagRange;
1533             else
1534               Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1535                   << /*function=*/0 << DiagD << DiagRange;
1536           }
1537         } else if (!FD->isTargetMultiVersion() ||
1538                    FD->isTargetMultiVersionDefault()) {
1539           if (FD->getDescribedFunctionTemplate())
1540             Diag(DiagD->getLocation(), diag::warn_unused_template)
1541                 << /*function=*/0 << DiagD << DiagRange;
1542           else
1543             Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
1544                                            ? diag::warn_unused_member_function
1545                                            : diag::warn_unused_function)
1546                 << DiagD << DiagRange;
1547         }
1548       } else {
1549         const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
1550         if (!DiagD)
1551           DiagD = cast<VarDecl>(*I);
1552         SourceRange DiagRange = DiagD->getLocation();
1553         if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(DiagD)) {
1554           if (const ASTTemplateArgumentListInfo *ASTTAL =
1555                   VTSD->getTemplateArgsAsWritten())
1556             DiagRange.setEnd(ASTTAL->RAngleLoc);
1557         }
1558         if (DiagD->isReferenced()) {
1559           Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1560               << /*variable=*/1 << DiagD << DiagRange;
1561         } else if (DiagD->getDescribedVarTemplate()) {
1562           Diag(DiagD->getLocation(), diag::warn_unused_template)
1563               << /*variable=*/1 << DiagD << DiagRange;
1564         } else if (DiagD->getType().isConstQualified()) {
1565           const SourceManager &SM = SourceMgr;
1566           if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
1567               !PP.getLangOpts().IsHeaderFile)
1568             Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
1569                 << DiagD << DiagRange;
1570         } else {
1571           Diag(DiagD->getLocation(), diag::warn_unused_variable)
1572               << DiagD << DiagRange;
1573         }
1574       }
1575     }
1576 
1577     emitAndClearUnusedLocalTypedefWarnings();
1578   }
1579 
1580   if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
1581     // FIXME: Load additional unused private field candidates from the external
1582     // source.
1583     RecordCompleteMap RecordsComplete;
1584     RecordCompleteMap MNCComplete;
1585     for (const NamedDecl *D : UnusedPrivateFields) {
1586       const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
1587       if (RD && !RD->isUnion() &&
1588           IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1589         Diag(D->getLocation(), diag::warn_unused_private_field)
1590               << D->getDeclName();
1591       }
1592     }
1593   }
1594 
1595   if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
1596     if (ExternalSource)
1597       ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1598     for (const auto &DeletedFieldInfo : DeleteExprs) {
1599       for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1600         AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first,
1601                                   DeleteExprLoc.second);
1602       }
1603     }
1604   }
1605 
1606   AnalysisWarnings.IssueWarnings(Context.getTranslationUnitDecl());
1607 
1608   if (Context.hasAnyFunctionEffects())
1609     performFunctionEffectAnalysis(Context.getTranslationUnitDecl());
1610 
1611   // Check we've noticed that we're no longer parsing the initializer for every
1612   // variable. If we miss cases, then at best we have a performance issue and
1613   // at worst a rejects-valid bug.
1614   assert(ParsingInitForAutoVars.empty() &&
1615          "Didn't unmark var as having its initializer parsed");
1616 
1617   if (!PP.isIncrementalProcessingEnabled())
1618     TUScope = nullptr;
1619 }
1620 
1621 
1622 //===----------------------------------------------------------------------===//
1623 // Helper functions.
1624 //===----------------------------------------------------------------------===//
1625 
getFunctionLevelDeclContext(bool AllowLambda) const1626 DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
1627   DeclContext *DC = CurContext;
1628 
1629   while (true) {
1630     if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) ||
1631         isa<RequiresExprBodyDecl>(DC)) {
1632       DC = DC->getParent();
1633     } else if (!AllowLambda && isa<CXXMethodDecl>(DC) &&
1634                cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
1635                cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
1636       DC = DC->getParent()->getParent();
1637     } else break;
1638   }
1639 
1640   return DC;
1641 }
1642 
1643 /// getCurFunctionDecl - If inside of a function body, this returns a pointer
1644 /// to the function decl for the function being parsed.  If we're currently
1645 /// in a 'block', this returns the containing context.
getCurFunctionDecl(bool AllowLambda) const1646 FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
1647   DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1648   return dyn_cast<FunctionDecl>(DC);
1649 }
1650 
getCurMethodDecl()1651 ObjCMethodDecl *Sema::getCurMethodDecl() {
1652   DeclContext *DC = getFunctionLevelDeclContext();
1653   while (isa<RecordDecl>(DC))
1654     DC = DC->getParent();
1655   return dyn_cast<ObjCMethodDecl>(DC);
1656 }
1657 
getCurFunctionOrMethodDecl() const1658 NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
1659   DeclContext *DC = getFunctionLevelDeclContext();
1660   if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
1661     return cast<NamedDecl>(DC);
1662   return nullptr;
1663 }
1664 
getDefaultCXXMethodAddrSpace() const1665 LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1666   if (getLangOpts().OpenCL)
1667     return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1668   return LangAS::Default;
1669 }
1670 
EmitDiagnostic(unsigned DiagID,const DiagnosticBuilder & DB)1671 void Sema::EmitDiagnostic(unsigned DiagID, const DiagnosticBuilder &DB) {
1672   // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1673   // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1674   // been made more painfully obvious by the refactor that introduced this
1675   // function, but it is possible that the incoming argument can be
1676   // eliminated. If it truly cannot be (for example, there is some reentrancy
1677   // issue I am not seeing yet), then there should at least be a clarifying
1678   // comment somewhere.
1679   Diagnostic DiagInfo(&Diags, DB);
1680   if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) {
1681     switch (DiagnosticIDs::getDiagnosticSFINAEResponse(DiagInfo.getID())) {
1682     case DiagnosticIDs::SFINAE_Report:
1683       // We'll report the diagnostic below.
1684       break;
1685 
1686     case DiagnosticIDs::SFINAE_SubstitutionFailure:
1687       // Count this failure so that we know that template argument deduction
1688       // has failed.
1689       ++NumSFINAEErrors;
1690 
1691       // Make a copy of this suppressed diagnostic and store it with the
1692       // template-deduction information.
1693       if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1694         (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1695                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1696       }
1697 
1698       Diags.setLastDiagnosticIgnored(true);
1699       return;
1700 
1701     case DiagnosticIDs::SFINAE_AccessControl: {
1702       // Per C++ Core Issue 1170, access control is part of SFINAE.
1703       // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
1704       // make access control a part of SFINAE for the purposes of checking
1705       // type traits.
1706       if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
1707         break;
1708 
1709       SourceLocation Loc = DiagInfo.getLocation();
1710 
1711       // Suppress this diagnostic.
1712       ++NumSFINAEErrors;
1713 
1714       // Make a copy of this suppressed diagnostic and store it with the
1715       // template-deduction information.
1716       if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1717         (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(),
1718                        PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1719       }
1720 
1721       Diags.setLastDiagnosticIgnored(true);
1722 
1723       // Now produce a C++98 compatibility warning.
1724       Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
1725 
1726       // The last diagnostic which Sema produced was ignored. Suppress any
1727       // notes attached to it.
1728       Diags.setLastDiagnosticIgnored(true);
1729       return;
1730     }
1731 
1732     case DiagnosticIDs::SFINAE_Suppress:
1733       if (DiagnosticsEngine::Level Level = getDiagnostics().getDiagnosticLevel(
1734               DiagInfo.getID(), DiagInfo.getLocation());
1735           Level == DiagnosticsEngine::Ignored)
1736         return;
1737       // Make a copy of this suppressed diagnostic and store it with the
1738       // template-deduction information;
1739       if (*Info) {
1740         (*Info)->addSuppressedDiagnostic(
1741             DiagInfo.getLocation(),
1742             PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1743         if (!Diags.getDiagnosticIDs()->isNote(DiagID))
1744           PrintContextStack([Info](SourceLocation Loc, PartialDiagnostic PD) {
1745             (*Info)->addSuppressedDiagnostic(Loc, std::move(PD));
1746           });
1747       }
1748 
1749       // Suppress this diagnostic.
1750       Diags.setLastDiagnosticIgnored(true);
1751       return;
1752     }
1753   }
1754 
1755   // Copy the diagnostic printing policy over the ASTContext printing policy.
1756   // TODO: Stop doing that.  See: https://reviews.llvm.org/D45093#1090292
1757   Context.setPrintingPolicy(getPrintingPolicy());
1758 
1759   // Emit the diagnostic.
1760   if (!Diags.EmitDiagnostic(DB))
1761     return;
1762 
1763   // If this is not a note, and we're in a template instantiation
1764   // that is different from the last template instantiation where
1765   // we emitted an error, print a template instantiation
1766   // backtrace.
1767   if (!Diags.getDiagnosticIDs()->isNote(DiagID))
1768     PrintContextStack();
1769 }
1770 
hasUncompilableErrorOccurred() const1771 bool Sema::hasUncompilableErrorOccurred() const {
1772   if (getDiagnostics().hasUncompilableErrorOccurred())
1773     return true;
1774   auto *FD = dyn_cast<FunctionDecl>(CurContext);
1775   if (!FD)
1776     return false;
1777   auto Loc = DeviceDeferredDiags.find(FD);
1778   if (Loc == DeviceDeferredDiags.end())
1779     return false;
1780   for (auto PDAt : Loc->second) {
1781     if (Diags.getDiagnosticIDs()->isDefaultMappingAsError(
1782             PDAt.second.getDiagID()))
1783       return true;
1784   }
1785   return false;
1786 }
1787 
1788 // Print notes showing how we can reach FD starting from an a priori
1789 // known-callable function.
emitCallStackNotes(Sema & S,const FunctionDecl * FD)1790 static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
1791   auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(FD);
1792   while (FnIt != S.CUDA().DeviceKnownEmittedFns.end()) {
1793     // Respect error limit.
1794     if (S.Diags.hasFatalErrorOccurred())
1795       return;
1796     DiagnosticBuilder Builder(
1797         S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
1798     Builder << FnIt->second.FD;
1799     FnIt = S.CUDA().DeviceKnownEmittedFns.find(FnIt->second.FD);
1800   }
1801 }
1802 
1803 namespace {
1804 
1805 /// Helper class that emits deferred diagnostic messages if an entity directly
1806 /// or indirectly using the function that causes the deferred diagnostic
1807 /// messages is known to be emitted.
1808 ///
1809 /// During parsing of AST, certain diagnostic messages are recorded as deferred
1810 /// diagnostics since it is unknown whether the functions containing such
1811 /// diagnostics will be emitted. A list of potentially emitted functions and
1812 /// variables that may potentially trigger emission of functions are also
1813 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1814 /// by each function to emit deferred diagnostics.
1815 ///
1816 /// During the visit, certain OpenMP directives or initializer of variables
1817 /// with certain OpenMP attributes will cause subsequent visiting of any
1818 /// functions enter a state which is called OpenMP device context in this
1819 /// implementation. The state is exited when the directive or initializer is
1820 /// exited. This state can change the emission states of subsequent uses
1821 /// of functions.
1822 ///
1823 /// Conceptually the functions or variables to be visited form a use graph
1824 /// where the parent node uses the child node. At any point of the visit,
1825 /// the tree nodes traversed from the tree root to the current node form a use
1826 /// stack. The emission state of the current node depends on two factors:
1827 ///    1. the emission state of the root node
1828 ///    2. whether the current node is in OpenMP device context
1829 /// If the function is decided to be emitted, its contained deferred diagnostics
1830 /// are emitted, together with the information about the use stack.
1831 ///
1832 class DeferredDiagnosticsEmitter
1833     : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1834 public:
1835   typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1836 
1837   // Whether the function is already in the current use-path.
1838   llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1839 
1840   // The current use-path.
1841   llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1842 
1843   // Whether the visiting of the function has been done. Done[0] is for the
1844   // case not in OpenMP device context. Done[1] is for the case in OpenMP
1845   // device context. We need two sets because diagnostics emission may be
1846   // different depending on whether it is in OpenMP device context.
1847   llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1848 
1849   // Emission state of the root node of the current use graph.
1850   bool ShouldEmitRootNode;
1851 
1852   // Current OpenMP device context level. It is initialized to 0 and each
1853   // entering of device context increases it by 1 and each exit decreases
1854   // it by 1. Non-zero value indicates it is currently in device context.
1855   unsigned InOMPDeviceContext;
1856 
DeferredDiagnosticsEmitter(Sema & S)1857   DeferredDiagnosticsEmitter(Sema &S)
1858       : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1859 
shouldVisitDiscardedStmt() const1860   bool shouldVisitDiscardedStmt() const { return false; }
1861 
VisitOMPTargetDirective(OMPTargetDirective * Node)1862   void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1863     ++InOMPDeviceContext;
1864     Inherited::VisitOMPTargetDirective(Node);
1865     --InOMPDeviceContext;
1866   }
1867 
visitUsedDecl(SourceLocation Loc,Decl * D)1868   void visitUsedDecl(SourceLocation Loc, Decl *D) {
1869     if (isa<VarDecl>(D))
1870       return;
1871     if (auto *FD = dyn_cast<FunctionDecl>(D))
1872       checkFunc(Loc, FD);
1873     else
1874       Inherited::visitUsedDecl(Loc, D);
1875   }
1876 
1877   // Visitor member and parent dtors called by this dtor.
VisitCalledDestructors(CXXDestructorDecl * DD)1878   void VisitCalledDestructors(CXXDestructorDecl *DD) {
1879     const CXXRecordDecl *RD = DD->getParent();
1880 
1881     // Visit the dtors of all members
1882     for (const FieldDecl *FD : RD->fields()) {
1883       QualType FT = FD->getType();
1884       if (const auto *RT = FT->getAs<RecordType>())
1885         if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1886           if (ClassDecl->hasDefinition())
1887             if (CXXDestructorDecl *MemberDtor = ClassDecl->getDestructor())
1888               asImpl().visitUsedDecl(MemberDtor->getLocation(), MemberDtor);
1889     }
1890 
1891     // Also visit base class dtors
1892     for (const auto &Base : RD->bases()) {
1893       QualType BaseType = Base.getType();
1894       if (const auto *RT = BaseType->getAs<RecordType>())
1895         if (const auto *BaseDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1896           if (BaseDecl->hasDefinition())
1897             if (CXXDestructorDecl *BaseDtor = BaseDecl->getDestructor())
1898               asImpl().visitUsedDecl(BaseDtor->getLocation(), BaseDtor);
1899     }
1900   }
1901 
VisitDeclStmt(DeclStmt * DS)1902   void VisitDeclStmt(DeclStmt *DS) {
1903     // Visit dtors called by variables that need destruction
1904     for (auto *D : DS->decls())
1905       if (auto *VD = dyn_cast<VarDecl>(D))
1906         if (VD->isThisDeclarationADefinition() &&
1907             VD->needsDestruction(S.Context)) {
1908           QualType VT = VD->getType();
1909           if (const auto *RT = VT->getAs<RecordType>())
1910             if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1911               if (ClassDecl->hasDefinition())
1912                 if (CXXDestructorDecl *Dtor = ClassDecl->getDestructor())
1913                   asImpl().visitUsedDecl(Dtor->getLocation(), Dtor);
1914         }
1915 
1916     Inherited::VisitDeclStmt(DS);
1917   }
checkVar(VarDecl * VD)1918   void checkVar(VarDecl *VD) {
1919     assert(VD->isFileVarDecl() &&
1920            "Should only check file-scope variables");
1921     if (auto *Init = VD->getInit()) {
1922       auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
1923       bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
1924                              *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
1925       if (IsDev)
1926         ++InOMPDeviceContext;
1927       this->Visit(Init);
1928       if (IsDev)
1929         --InOMPDeviceContext;
1930     }
1931   }
1932 
checkFunc(SourceLocation Loc,FunctionDecl * FD)1933   void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
1934     auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
1935     FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1936     if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1937         S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD))
1938       return;
1939     // Finalize analysis of OpenMP-specific constructs.
1940     if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1941         (ShouldEmitRootNode || InOMPDeviceContext))
1942       S.OpenMP().finalizeOpenMPDelayedAnalysis(Caller, FD, Loc);
1943     if (Caller)
1944       S.CUDA().DeviceKnownEmittedFns[FD] = {Caller, Loc};
1945     // Always emit deferred diagnostics for the direct users. This does not
1946     // lead to explosion of diagnostics since each user is visited at most
1947     // twice.
1948     if (ShouldEmitRootNode || InOMPDeviceContext)
1949       emitDeferredDiags(FD, Caller);
1950     // Do not revisit a function if the function body has been completely
1951     // visited before.
1952     if (!Done.insert(FD).second)
1953       return;
1954     InUsePath.insert(FD);
1955     UsePath.push_back(FD);
1956     if (auto *S = FD->getBody()) {
1957       this->Visit(S);
1958     }
1959     if (CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD))
1960       asImpl().VisitCalledDestructors(Dtor);
1961     UsePath.pop_back();
1962     InUsePath.erase(FD);
1963   }
1964 
checkRecordedDecl(Decl * D)1965   void checkRecordedDecl(Decl *D) {
1966     if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1967       ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) ==
1968                            Sema::FunctionEmissionStatus::Emitted;
1969       checkFunc(SourceLocation(), FD);
1970     } else
1971       checkVar(cast<VarDecl>(D));
1972   }
1973 
1974   // Emit any deferred diagnostics for FD
emitDeferredDiags(FunctionDecl * FD,bool ShowCallStack)1975   void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
1976     auto It = S.DeviceDeferredDiags.find(FD);
1977     if (It == S.DeviceDeferredDiags.end())
1978       return;
1979     bool HasWarningOrError = false;
1980     bool FirstDiag = true;
1981     for (PartialDiagnosticAt &PDAt : It->second) {
1982       // Respect error limit.
1983       if (S.Diags.hasFatalErrorOccurred())
1984         return;
1985       const SourceLocation &Loc = PDAt.first;
1986       const PartialDiagnostic &PD = PDAt.second;
1987       HasWarningOrError |=
1988           S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >=
1989           DiagnosticsEngine::Warning;
1990       {
1991         DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
1992         PD.Emit(Builder);
1993       }
1994       // Emit the note on the first diagnostic in case too many diagnostics
1995       // cause the note not emitted.
1996       if (FirstDiag && HasWarningOrError && ShowCallStack) {
1997         emitCallStackNotes(S, FD);
1998         FirstDiag = false;
1999       }
2000     }
2001   }
2002 };
2003 } // namespace
2004 
emitDeferredDiags()2005 void Sema::emitDeferredDiags() {
2006   if (ExternalSource)
2007     ExternalSource->ReadDeclsToCheckForDeferredDiags(
2008         DeclsToCheckForDeferredDiags);
2009 
2010   if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
2011       DeclsToCheckForDeferredDiags.empty())
2012     return;
2013 
2014   DeferredDiagnosticsEmitter DDE(*this);
2015   for (auto *D : DeclsToCheckForDeferredDiags)
2016     DDE.checkRecordedDecl(D);
2017 }
2018 
2019 // In CUDA, there are some constructs which may appear in semantically-valid
2020 // code, but trigger errors if we ever generate code for the function in which
2021 // they appear.  Essentially every construct you're not allowed to use on the
2022 // device falls into this category, because you are allowed to use these
2023 // constructs in a __host__ __device__ function, but only if that function is
2024 // never codegen'ed on the device.
2025 //
2026 // To handle semantic checking for these constructs, we keep track of the set of
2027 // functions we know will be emitted, either because we could tell a priori that
2028 // they would be emitted, or because they were transitively called by a
2029 // known-emitted function.
2030 //
2031 // We also keep a partial call graph of which not-known-emitted functions call
2032 // which other not-known-emitted functions.
2033 //
2034 // When we see something which is illegal if the current function is emitted
2035 // (usually by way of DiagIfDeviceCode, DiagIfHostCode, or
2036 // CheckCall), we first check if the current function is known-emitted.  If
2037 // so, we immediately output the diagnostic.
2038 //
2039 // Otherwise, we "defer" the diagnostic.  It sits in Sema::DeviceDeferredDiags
2040 // until we discover that the function is known-emitted, at which point we take
2041 // it out of this map and emit the diagnostic.
2042 
SemaDiagnosticBuilder(Kind K,SourceLocation Loc,unsigned DiagID,const FunctionDecl * Fn,Sema & S)2043 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
2044                                                    unsigned DiagID,
2045                                                    const FunctionDecl *Fn,
2046                                                    Sema &S)
2047     : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
2048       ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
2049   switch (K) {
2050   case K_Nop:
2051     break;
2052   case K_Immediate:
2053   case K_ImmediateWithCallStack:
2054     ImmediateDiag.emplace(
2055         ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
2056     break;
2057   case K_Deferred:
2058     assert(Fn && "Must have a function to attach the deferred diag to.");
2059     auto &Diags = S.DeviceDeferredDiags[Fn];
2060     PartialDiagId.emplace(Diags.size());
2061     Diags.emplace_back(Loc, S.PDiag(DiagID));
2062     break;
2063   }
2064 }
2065 
SemaDiagnosticBuilder(SemaDiagnosticBuilder && D)2066 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
2067     : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
2068       ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
2069       PartialDiagId(D.PartialDiagId) {
2070   // Clean the previous diagnostics.
2071   D.ShowCallStack = false;
2072   D.ImmediateDiag.reset();
2073   D.PartialDiagId.reset();
2074 }
2075 
~SemaDiagnosticBuilder()2076 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
2077   if (ImmediateDiag) {
2078     // Emit our diagnostic and, if it was a warning or error, output a callstack
2079     // if Fn isn't a priori known-emitted.
2080     ImmediateDiag.reset(); // Emit the immediate diag.
2081 
2082     if (ShowCallStack) {
2083       bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
2084                                   DiagID, Loc) >= DiagnosticsEngine::Warning;
2085       if (IsWarningOrError)
2086         emitCallStackNotes(S, Fn);
2087     }
2088   } else {
2089     assert((!PartialDiagId || ShowCallStack) &&
2090            "Must always show call stack for deferred diags.");
2091   }
2092 }
2093 
2094 Sema::SemaDiagnosticBuilder
targetDiag(SourceLocation Loc,unsigned DiagID,const FunctionDecl * FD)2095 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
2096   FD = FD ? FD : getCurFunctionDecl();
2097   if (LangOpts.OpenMP)
2098     return LangOpts.OpenMPIsTargetDevice
2099                ? OpenMP().diagIfOpenMPDeviceCode(Loc, DiagID, FD)
2100                : OpenMP().diagIfOpenMPHostCode(Loc, DiagID, FD);
2101   if (getLangOpts().CUDA)
2102     return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID)
2103                                       : CUDA().DiagIfHostCode(Loc, DiagID);
2104 
2105   if (getLangOpts().SYCLIsDevice)
2106     return SYCL().DiagIfDeviceCode(Loc, DiagID);
2107 
2108   return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
2109                                FD, *this);
2110 }
2111 
checkTypeSupport(QualType Ty,SourceLocation Loc,ValueDecl * D)2112 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
2113   if (isUnevaluatedContext() || Ty.isNull())
2114     return;
2115 
2116   // The original idea behind checkTypeSupport function is that unused
2117   // declarations can be replaced with an array of bytes of the same size during
2118   // codegen, such replacement doesn't seem to be possible for types without
2119   // constant byte size like zero length arrays. So, do a deep check for SYCL.
2120   if (D && LangOpts.SYCLIsDevice) {
2121     llvm::DenseSet<QualType> Visited;
2122     SYCL().deepTypeCheckForDevice(Loc, Visited, D);
2123   }
2124 
2125   Decl *C = cast<Decl>(getCurLexicalContext());
2126 
2127   // Memcpy operations for structs containing a member with unsupported type
2128   // are ok, though.
2129   if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) {
2130     if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
2131         MD->isTrivial())
2132       return;
2133 
2134     if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD))
2135       if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
2136         return;
2137   }
2138 
2139   // Try to associate errors with the lexical context, if that is a function, or
2140   // the value declaration otherwise.
2141   const FunctionDecl *FD = isa<FunctionDecl>(C)
2142                                ? cast<FunctionDecl>(C)
2143                                : dyn_cast_or_null<FunctionDecl>(D);
2144 
2145   auto CheckDeviceType = [&](QualType Ty) {
2146     if (Ty->isDependentType())
2147       return;
2148 
2149     if (Ty->isBitIntType()) {
2150       if (!Context.getTargetInfo().hasBitIntType()) {
2151         PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2152         if (D)
2153           PD << D;
2154         else
2155           PD << "expression";
2156         targetDiag(Loc, PD, FD)
2157             << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
2158             << Ty << Context.getTargetInfo().getTriple().str();
2159       }
2160       return;
2161     }
2162 
2163     // Check if we are dealing with two 'long double' but with different
2164     // semantics.
2165     bool LongDoubleMismatched = false;
2166     if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) {
2167       const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty);
2168       if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
2169            !Context.getTargetInfo().hasFloat128Type()) ||
2170           (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
2171            !Context.getTargetInfo().hasIbm128Type()))
2172         LongDoubleMismatched = true;
2173     }
2174 
2175     if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
2176         (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
2177         (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
2178         (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
2179          !Context.getTargetInfo().hasInt128Type()) ||
2180         (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
2181          !LangOpts.CUDAIsDevice) ||
2182         LongDoubleMismatched) {
2183       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2184       if (D)
2185         PD << D;
2186       else
2187         PD << "expression";
2188 
2189       if (targetDiag(Loc, PD, FD)
2190           << true /*show bit size*/
2191           << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
2192           << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
2193         if (D)
2194           D->setInvalidDecl();
2195       }
2196       if (D)
2197         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2198     }
2199   };
2200 
2201   auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
2202     if (LangOpts.SYCLIsDevice ||
2203         (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
2204         LangOpts.CUDAIsDevice)
2205       CheckDeviceType(Ty);
2206 
2207     QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2208     const TargetInfo &TI = Context.getTargetInfo();
2209     if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2210       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2211       if (D)
2212         PD << D;
2213       else
2214         PD << "expression";
2215 
2216       if (Diag(Loc, PD, FD)
2217           << false /*show bit size*/ << 0 << Ty << false /*return*/
2218           << TI.getTriple().str()) {
2219         if (D)
2220           D->setInvalidDecl();
2221       }
2222       if (D)
2223         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2224     }
2225 
2226     bool IsDouble = UnqualTy == Context.DoubleTy;
2227     bool IsFloat = UnqualTy == Context.FloatTy;
2228     if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2229       PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2230       if (D)
2231         PD << D;
2232       else
2233         PD << "expression";
2234 
2235       if (Diag(Loc, PD, FD)
2236           << false /*show bit size*/ << 0 << Ty << true /*return*/
2237           << TI.getTriple().str()) {
2238         if (D)
2239           D->setInvalidDecl();
2240       }
2241       if (D)
2242         targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2243     }
2244 
2245     if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
2246       llvm::StringMap<bool> CallerFeatureMap;
2247       Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2248       RISCV().checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
2249     }
2250 
2251     // Don't allow SVE types in functions without a SVE target.
2252     if (Ty->isSVESizelessBuiltinType() && FD) {
2253       llvm::StringMap<bool> CallerFeatureMap;
2254       Context.getFunctionFeatureMap(CallerFeatureMap, FD);
2255       if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap)) {
2256         if (!Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
2257           Diag(Loc, diag::err_sve_vector_in_non_sve_target) << Ty;
2258         else if (!IsArmStreamingFunction(FD,
2259                                          /*IncludeLocallyStreaming=*/true)) {
2260           Diag(Loc, diag::err_sve_vector_in_non_streaming_function) << Ty;
2261         }
2262       }
2263     }
2264 
2265     if (auto *VT = Ty->getAs<VectorType>();
2266         VT && FD &&
2267         (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
2268          VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) &&
2269         (LangOpts.VScaleMin != LangOpts.VScaleStreamingMin ||
2270          LangOpts.VScaleMax != LangOpts.VScaleStreamingMax)) {
2271       if (IsArmStreamingFunction(FD, /*IncludeLocallyStreaming=*/true)) {
2272         Diag(Loc, diag::err_sve_fixed_vector_in_streaming_function)
2273             << Ty << /*Streaming*/ 0;
2274       } else if (const auto *FTy = FD->getType()->getAs<FunctionProtoType>()) {
2275         if (FTy->getAArch64SMEAttributes() &
2276             FunctionType::SME_PStateSMCompatibleMask) {
2277           Diag(Loc, diag::err_sve_fixed_vector_in_streaming_function)
2278               << Ty << /*StreamingCompatible*/ 1;
2279         }
2280       }
2281     }
2282   };
2283 
2284   CheckType(Ty);
2285   if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) {
2286     for (const auto &ParamTy : FPTy->param_types())
2287       CheckType(ParamTy);
2288     CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2289   }
2290   if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty))
2291     CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2292 }
2293 
findMacroSpelling(SourceLocation & locref,StringRef name)2294 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2295   SourceLocation loc = locref;
2296   if (!loc.isMacroID()) return false;
2297 
2298   // There's no good way right now to look at the intermediate
2299   // expansions, so just jump to the expansion location.
2300   loc = getSourceManager().getExpansionLoc(loc);
2301 
2302   // If that's written with the name, stop here.
2303   SmallString<16> buffer;
2304   if (getPreprocessor().getSpelling(loc, buffer) == name) {
2305     locref = loc;
2306     return true;
2307   }
2308   return false;
2309 }
2310 
getScopeForContext(DeclContext * Ctx)2311 Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2312 
2313   if (!Ctx)
2314     return nullptr;
2315 
2316   Ctx = Ctx->getPrimaryContext();
2317   for (Scope *S = getCurScope(); S; S = S->getParent()) {
2318     // Ignore scopes that cannot have declarations. This is important for
2319     // out-of-line definitions of static class members.
2320     if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2321       if (DeclContext *Entity = S->getEntity())
2322         if (Ctx == Entity->getPrimaryContext())
2323           return S;
2324   }
2325 
2326   return nullptr;
2327 }
2328 
2329 /// Enter a new function scope
PushFunctionScope()2330 void Sema::PushFunctionScope() {
2331   if (FunctionScopes.empty() && CachedFunctionScope) {
2332     // Use CachedFunctionScope to avoid allocating memory when possible.
2333     CachedFunctionScope->Clear();
2334     FunctionScopes.push_back(CachedFunctionScope.release());
2335   } else {
2336     FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
2337   }
2338   if (LangOpts.OpenMP)
2339     OpenMP().pushOpenMPFunctionRegion();
2340 }
2341 
PushBlockScope(Scope * BlockScope,BlockDecl * Block)2342 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2343   FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
2344                                               BlockScope, Block));
2345   CapturingFunctionScopes++;
2346 }
2347 
PushLambdaScope()2348 LambdaScopeInfo *Sema::PushLambdaScope() {
2349   LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2350   FunctionScopes.push_back(LSI);
2351   CapturingFunctionScopes++;
2352   return LSI;
2353 }
2354 
RecordParsingTemplateParameterDepth(unsigned Depth)2355 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2356   if (LambdaScopeInfo *const LSI = getCurLambda()) {
2357     LSI->AutoTemplateParameterDepth = Depth;
2358     return;
2359   }
2360   llvm_unreachable(
2361       "Remove assertion if intentionally called in a non-lambda context.");
2362 }
2363 
2364 // Check that the type of the VarDecl has an accessible copy constructor and
2365 // resolve its destructor's exception specification.
2366 // This also performs initialization of block variables when they are moved
2367 // to the heap. It uses the same rules as applicable for implicit moves
2368 // according to the C++ standard in effect ([class.copy.elision]p3).
checkEscapingByref(VarDecl * VD,Sema & S)2369 static void checkEscapingByref(VarDecl *VD, Sema &S) {
2370   QualType T = VD->getType();
2371   EnterExpressionEvaluationContext scope(
2372       S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2373   SourceLocation Loc = VD->getLocation();
2374   Expr *VarRef =
2375       new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2376   ExprResult Result;
2377   auto IE = InitializedEntity::InitializeBlock(Loc, T);
2378   if (S.getLangOpts().CPlusPlus23) {
2379     auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr,
2380                                        VK_XValue, FPOptionsOverride());
2381     Result = S.PerformCopyInitialization(IE, SourceLocation(), E);
2382   } else {
2383     Result = S.PerformMoveOrCopyInitialization(
2384         IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible},
2385         VarRef);
2386   }
2387 
2388   if (!Result.isInvalid()) {
2389     Result = S.MaybeCreateExprWithCleanups(Result);
2390     Expr *Init = Result.getAs<Expr>();
2391     S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init));
2392   }
2393 
2394   // The destructor's exception specification is needed when IRGen generates
2395   // block copy/destroy functions. Resolve it here.
2396   if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2397     if (CXXDestructorDecl *DD = RD->getDestructor()) {
2398       auto *FPT = DD->getType()->castAs<FunctionProtoType>();
2399       S.ResolveExceptionSpec(Loc, FPT);
2400     }
2401 }
2402 
markEscapingByrefs(const FunctionScopeInfo & FSI,Sema & S)2403 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2404   // Set the EscapingByref flag of __block variables captured by
2405   // escaping blocks.
2406   for (const BlockDecl *BD : FSI.Blocks) {
2407     for (const BlockDecl::Capture &BC : BD->captures()) {
2408       VarDecl *VD = BC.getVariable();
2409       if (VD->hasAttr<BlocksAttr>()) {
2410         // Nothing to do if this is a __block variable captured by a
2411         // non-escaping block.
2412         if (BD->doesNotEscape())
2413           continue;
2414         VD->setEscapingByref();
2415       }
2416       // Check whether the captured variable is or contains an object of
2417       // non-trivial C union type.
2418       QualType CapType = BC.getVariable()->getType();
2419       if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2420           CapType.hasNonTrivialToPrimitiveCopyCUnion())
2421         S.checkNonTrivialCUnion(BC.getVariable()->getType(),
2422                                 BD->getCaretLocation(),
2423                                 NonTrivialCUnionContext::BlockCapture,
2424                                 Sema::NTCUK_Destruct | Sema::NTCUK_Copy);
2425     }
2426   }
2427 
2428   for (VarDecl *VD : FSI.ByrefBlockVars) {
2429     // __block variables might require us to capture a copy-initializer.
2430     if (!VD->isEscapingByref())
2431       continue;
2432     // It's currently invalid to ever have a __block variable with an
2433     // array type; should we diagnose that here?
2434     // Regardless, we don't want to ignore array nesting when
2435     // constructing this copy.
2436     if (VD->getType()->isStructureOrClassType())
2437       checkEscapingByref(VD, S);
2438   }
2439 }
2440 
2441 Sema::PoppedFunctionScopePtr
PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy * WP,const Decl * D,QualType BlockType)2442 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
2443                            const Decl *D, QualType BlockType) {
2444   assert(!FunctionScopes.empty() && "mismatched push/pop!");
2445 
2446   markEscapingByrefs(*FunctionScopes.back(), *this);
2447 
2448   PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2449                                PoppedFunctionScopeDeleter(this));
2450 
2451   if (LangOpts.OpenMP)
2452     OpenMP().popOpenMPFunctionRegion(Scope.get());
2453 
2454   // Issue any analysis-based warnings.
2455   if (WP && D) {
2456     inferNoReturnAttr(*this, D);
2457     AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType);
2458   } else
2459     for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2460       Diag(PUD.Loc, PUD.PD);
2461 
2462   return Scope;
2463 }
2464 
2465 void Sema::PoppedFunctionScopeDeleter::
operator ()(sema::FunctionScopeInfo * Scope) const2466 operator()(sema::FunctionScopeInfo *Scope) const {
2467   if (!Scope->isPlainFunction())
2468     Self->CapturingFunctionScopes--;
2469   // Stash the function scope for later reuse if it's for a normal function.
2470   if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2471     Self->CachedFunctionScope.reset(Scope);
2472   else
2473     delete Scope;
2474 }
2475 
PushCompoundScope(bool IsStmtExpr)2476 void Sema::PushCompoundScope(bool IsStmtExpr) {
2477   getCurFunction()->CompoundScopes.push_back(
2478       CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2479 }
2480 
PopCompoundScope()2481 void Sema::PopCompoundScope() {
2482   FunctionScopeInfo *CurFunction = getCurFunction();
2483   assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2484 
2485   CurFunction->CompoundScopes.pop_back();
2486 }
2487 
hasAnyUnrecoverableErrorsInThisFunction() const2488 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2489   return getCurFunction()->hasUnrecoverableErrorOccurred();
2490 }
2491 
setFunctionHasBranchIntoScope()2492 void Sema::setFunctionHasBranchIntoScope() {
2493   if (!FunctionScopes.empty())
2494     FunctionScopes.back()->setHasBranchIntoScope();
2495 }
2496 
setFunctionHasBranchProtectedScope()2497 void Sema::setFunctionHasBranchProtectedScope() {
2498   if (!FunctionScopes.empty())
2499     FunctionScopes.back()->setHasBranchProtectedScope();
2500 }
2501 
setFunctionHasIndirectGoto()2502 void Sema::setFunctionHasIndirectGoto() {
2503   if (!FunctionScopes.empty())
2504     FunctionScopes.back()->setHasIndirectGoto();
2505 }
2506 
setFunctionHasMustTail()2507 void Sema::setFunctionHasMustTail() {
2508   if (!FunctionScopes.empty())
2509     FunctionScopes.back()->setHasMustTail();
2510 }
2511 
getCurBlock()2512 BlockScopeInfo *Sema::getCurBlock() {
2513   if (FunctionScopes.empty())
2514     return nullptr;
2515 
2516   auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back());
2517   if (CurBSI && CurBSI->TheDecl &&
2518       !CurBSI->TheDecl->Encloses(CurContext)) {
2519     // We have switched contexts due to template instantiation.
2520     assert(!CodeSynthesisContexts.empty());
2521     return nullptr;
2522   }
2523 
2524   return CurBSI;
2525 }
2526 
getEnclosingFunction() const2527 FunctionScopeInfo *Sema::getEnclosingFunction() const {
2528   if (FunctionScopes.empty())
2529     return nullptr;
2530 
2531   for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2532     if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
2533       continue;
2534     return FunctionScopes[e];
2535   }
2536   return nullptr;
2537 }
2538 
getEnclosingLambdaOrBlock() const2539 CapturingScopeInfo *Sema::getEnclosingLambdaOrBlock() const {
2540   for (auto *Scope : llvm::reverse(FunctionScopes)) {
2541     if (auto *CSI = dyn_cast<CapturingScopeInfo>(Scope)) {
2542       auto *LSI = dyn_cast<LambdaScopeInfo>(CSI);
2543       if (LSI && LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
2544           LSI->AfterParameterList) {
2545         // We have switched contexts due to template instantiation.
2546         // FIXME: We should swap out the FunctionScopes during code synthesis
2547         // so that we don't need to check for this.
2548         assert(!CodeSynthesisContexts.empty());
2549         return nullptr;
2550       }
2551       return CSI;
2552     }
2553   }
2554   return nullptr;
2555 }
2556 
getCurLambda(bool IgnoreNonLambdaCapturingScope)2557 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2558   if (FunctionScopes.empty())
2559     return nullptr;
2560 
2561   auto I = FunctionScopes.rbegin();
2562   if (IgnoreNonLambdaCapturingScope) {
2563     auto E = FunctionScopes.rend();
2564     while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I))
2565       ++I;
2566     if (I == E)
2567       return nullptr;
2568   }
2569   auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I);
2570   if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
2571       !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) {
2572     // We have switched contexts due to template instantiation.
2573     assert(!CodeSynthesisContexts.empty());
2574     return nullptr;
2575   }
2576 
2577   return CurLSI;
2578 }
2579 
2580 // We have a generic lambda if we parsed auto parameters, or we have
2581 // an associated template parameter list.
getCurGenericLambda()2582 LambdaScopeInfo *Sema::getCurGenericLambda() {
2583   if (LambdaScopeInfo *LSI =  getCurLambda()) {
2584     return (LSI->TemplateParams.size() ||
2585                     LSI->GLTemplateParameterList) ? LSI : nullptr;
2586   }
2587   return nullptr;
2588 }
2589 
2590 
ActOnComment(SourceRange Comment)2591 void Sema::ActOnComment(SourceRange Comment) {
2592   if (!LangOpts.RetainCommentsFromSystemHeaders &&
2593       SourceMgr.isInSystemHeader(Comment.getBegin()))
2594     return;
2595   RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2596   if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
2597     SourceRange MagicMarkerRange(Comment.getBegin(),
2598                                  Comment.getBegin().getLocWithOffset(3));
2599     StringRef MagicMarkerText;
2600     switch (RC.getKind()) {
2601     case RawComment::RCK_OrdinaryBCPL:
2602       MagicMarkerText = "///<";
2603       break;
2604     case RawComment::RCK_OrdinaryC:
2605       MagicMarkerText = "/**<";
2606       break;
2607     case RawComment::RCK_Invalid:
2608       // FIXME: are there other scenarios that could produce an invalid
2609       // raw comment here?
2610       Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment);
2611       return;
2612     default:
2613       llvm_unreachable("if this is an almost Doxygen comment, "
2614                        "it should be ordinary");
2615     }
2616     Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
2617       FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
2618   }
2619   Context.addComment(RC);
2620 }
2621 
2622 // Pin this vtable to this file.
~ExternalSemaSource()2623 ExternalSemaSource::~ExternalSemaSource() {}
2624 char ExternalSemaSource::ID;
2625 
ReadMethodPool(Selector Sel)2626 void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
updateOutOfDateSelector(Selector Sel)2627 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2628 
ReadKnownNamespaces(SmallVectorImpl<NamespaceDecl * > & Namespaces)2629 void ExternalSemaSource::ReadKnownNamespaces(
2630                            SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2631 }
2632 
ReadUndefinedButUsed(llvm::MapVector<NamedDecl *,SourceLocation> & Undefined)2633 void ExternalSemaSource::ReadUndefinedButUsed(
2634     llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2635 
ReadMismatchingDeleteExpressions(llvm::MapVector<FieldDecl *,llvm::SmallVector<std::pair<SourceLocation,bool>,4>> &)2636 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2637     FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2638 
tryExprAsCall(Expr & E,QualType & ZeroArgCallReturnTy,UnresolvedSetImpl & OverloadSet)2639 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2640                          UnresolvedSetImpl &OverloadSet) {
2641   ZeroArgCallReturnTy = QualType();
2642   OverloadSet.clear();
2643 
2644   const OverloadExpr *Overloads = nullptr;
2645   bool IsMemExpr = false;
2646   if (E.getType() == Context.OverloadTy) {
2647     OverloadExpr::FindResult FR = OverloadExpr::find(&E);
2648 
2649     // Ignore overloads that are pointer-to-member constants.
2650     if (FR.HasFormOfMemberPointer)
2651       return false;
2652 
2653     Overloads = FR.Expression;
2654   } else if (E.getType() == Context.BoundMemberTy) {
2655     Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens());
2656     IsMemExpr = true;
2657   }
2658 
2659   bool Ambiguous = false;
2660   bool IsMV = false;
2661 
2662   if (Overloads) {
2663     for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2664          DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2665       OverloadSet.addDecl(*it);
2666 
2667       // Check whether the function is a non-template, non-member which takes no
2668       // arguments.
2669       if (IsMemExpr)
2670         continue;
2671       if (const FunctionDecl *OverloadDecl
2672             = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) {
2673         if (OverloadDecl->getMinRequiredArguments() == 0) {
2674           if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2675               (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2676                           OverloadDecl->isCPUSpecificMultiVersion()))) {
2677             ZeroArgCallReturnTy = QualType();
2678             Ambiguous = true;
2679           } else {
2680             ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2681             IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2682                    OverloadDecl->isCPUSpecificMultiVersion();
2683           }
2684         }
2685       }
2686     }
2687 
2688     // If it's not a member, use better machinery to try to resolve the call
2689     if (!IsMemExpr)
2690       return !ZeroArgCallReturnTy.isNull();
2691   }
2692 
2693   // Attempt to call the member with no arguments - this will correctly handle
2694   // member templates with defaults/deduction of template arguments, overloads
2695   // with default arguments, etc.
2696   if (IsMemExpr && !E.isTypeDependent()) {
2697     Sema::TentativeAnalysisScope Trap(*this);
2698     ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), {},
2699                                              SourceLocation());
2700     if (R.isUsable()) {
2701       ZeroArgCallReturnTy = R.get()->getType();
2702       return true;
2703     }
2704     return false;
2705   }
2706 
2707   if (const auto *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
2708     if (const auto *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
2709       if (Fun->getMinRequiredArguments() == 0)
2710         ZeroArgCallReturnTy = Fun->getReturnType();
2711       return true;
2712     }
2713   }
2714 
2715   // We don't have an expression that's convenient to get a FunctionDecl from,
2716   // but we can at least check if the type is "function of 0 arguments".
2717   QualType ExprTy = E.getType();
2718   const FunctionType *FunTy = nullptr;
2719   QualType PointeeTy = ExprTy->getPointeeType();
2720   if (!PointeeTy.isNull())
2721     FunTy = PointeeTy->getAs<FunctionType>();
2722   if (!FunTy)
2723     FunTy = ExprTy->getAs<FunctionType>();
2724 
2725   if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(FunTy)) {
2726     if (FPT->getNumParams() == 0)
2727       ZeroArgCallReturnTy = FunTy->getReturnType();
2728     return true;
2729   }
2730   return false;
2731 }
2732 
2733 /// Give notes for a set of overloads.
2734 ///
2735 /// A companion to tryExprAsCall. In cases when the name that the programmer
2736 /// wrote was an overloaded function, we may be able to make some guesses about
2737 /// plausible overloads based on their return types; such guesses can be handed
2738 /// off to this method to be emitted as notes.
2739 ///
2740 /// \param Overloads - The overloads to note.
2741 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2742 ///  -fshow-overloads=best, this is the location to attach to the note about too
2743 ///  many candidates. Typically this will be the location of the original
2744 ///  ill-formed expression.
noteOverloads(Sema & S,const UnresolvedSetImpl & Overloads,const SourceLocation FinalNoteLoc)2745 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2746                           const SourceLocation FinalNoteLoc) {
2747   unsigned ShownOverloads = 0;
2748   unsigned SuppressedOverloads = 0;
2749   for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2750        DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2751     if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2752       ++SuppressedOverloads;
2753       continue;
2754     }
2755 
2756     const NamedDecl *Fn = (*It)->getUnderlyingDecl();
2757     // Don't print overloads for non-default multiversioned functions.
2758     if (const auto *FD = Fn->getAsFunction()) {
2759       if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2760           !FD->getAttr<TargetAttr>()->isDefaultVersion())
2761         continue;
2762       if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2763           !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2764         continue;
2765     }
2766     S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
2767     ++ShownOverloads;
2768   }
2769 
2770   S.Diags.overloadCandidatesShown(ShownOverloads);
2771 
2772   if (SuppressedOverloads)
2773     S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
2774       << SuppressedOverloads;
2775 }
2776 
notePlausibleOverloads(Sema & S,SourceLocation Loc,const UnresolvedSetImpl & Overloads,bool (* IsPlausibleResult)(QualType))2777 static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2778                                    const UnresolvedSetImpl &Overloads,
2779                                    bool (*IsPlausibleResult)(QualType)) {
2780   if (!IsPlausibleResult)
2781     return noteOverloads(S, Overloads, Loc);
2782 
2783   UnresolvedSet<2> PlausibleOverloads;
2784   for (OverloadExpr::decls_iterator It = Overloads.begin(),
2785          DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2786     const auto *OverloadDecl = cast<FunctionDecl>(*It);
2787     QualType OverloadResultTy = OverloadDecl->getReturnType();
2788     if (IsPlausibleResult(OverloadResultTy))
2789       PlausibleOverloads.addDecl(It.getDecl());
2790   }
2791   noteOverloads(S, PlausibleOverloads, Loc);
2792 }
2793 
2794 /// Determine whether the given expression can be called by just
2795 /// putting parentheses after it.  Notably, expressions with unary
2796 /// operators can't be because the unary operator will start parsing
2797 /// outside the call.
IsCallableWithAppend(const Expr * E)2798 static bool IsCallableWithAppend(const Expr *E) {
2799   E = E->IgnoreImplicit();
2800   return (!isa<CStyleCastExpr>(E) &&
2801           !isa<UnaryOperator>(E) &&
2802           !isa<BinaryOperator>(E) &&
2803           !isa<CXXOperatorCallExpr>(E));
2804 }
2805 
IsCPUDispatchCPUSpecificMultiVersion(const Expr * E)2806 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2807   if (const auto *UO = dyn_cast<UnaryOperator>(E))
2808     E = UO->getSubExpr();
2809 
2810   if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
2811     if (ULE->getNumDecls() == 0)
2812       return false;
2813 
2814     const NamedDecl *ND = *ULE->decls_begin();
2815     if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2816       return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2817   }
2818   return false;
2819 }
2820 
tryToRecoverWithCall(ExprResult & E,const PartialDiagnostic & PD,bool ForceComplain,bool (* IsPlausibleResult)(QualType))2821 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2822                                 bool ForceComplain,
2823                                 bool (*IsPlausibleResult)(QualType)) {
2824   SourceLocation Loc = E.get()->getExprLoc();
2825   SourceRange Range = E.get()->getSourceRange();
2826   UnresolvedSet<4> Overloads;
2827 
2828   // If this is a SFINAE context, don't try anything that might trigger ADL
2829   // prematurely.
2830   if (!isSFINAEContext()) {
2831     QualType ZeroArgCallTy;
2832     if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
2833         !ZeroArgCallTy.isNull() &&
2834         (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2835       // At this point, we know E is potentially callable with 0
2836       // arguments and that it returns something of a reasonable type,
2837       // so we can emit a fixit and carry on pretending that E was
2838       // actually a CallExpr.
2839       SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
2840       bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2841       Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2842                     << (IsCallableWithAppend(E.get())
2843                             ? FixItHint::CreateInsertion(ParenInsertionLoc,
2844                                                          "()")
2845                             : FixItHint());
2846       if (!IsMV)
2847         notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2848 
2849       // FIXME: Try this before emitting the fixit, and suppress diagnostics
2850       // while doing so.
2851       E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), {},
2852                         Range.getEnd().getLocWithOffset(1));
2853       return true;
2854     }
2855   }
2856   if (!ForceComplain) return false;
2857 
2858   bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
2859   Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2860   if (!IsMV)
2861     notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
2862   E = ExprError();
2863   return true;
2864 }
2865 
getSuperIdentifier() const2866 IdentifierInfo *Sema::getSuperIdentifier() const {
2867   if (!Ident_super)
2868     Ident_super = &Context.Idents.get("super");
2869   return Ident_super;
2870 }
2871 
PushCapturedRegionScope(Scope * S,CapturedDecl * CD,RecordDecl * RD,CapturedRegionKind K,unsigned OpenMPCaptureLevel)2872 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2873                                    CapturedRegionKind K,
2874                                    unsigned OpenMPCaptureLevel) {
2875   auto *CSI = new CapturedRegionScopeInfo(
2876       getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2877       (getLangOpts().OpenMP && K == CR_OpenMP)
2878           ? OpenMP().getOpenMPNestingLevel()
2879           : 0,
2880       OpenMPCaptureLevel);
2881   CSI->ReturnType = Context.VoidTy;
2882   FunctionScopes.push_back(CSI);
2883   CapturingFunctionScopes++;
2884 }
2885 
getCurCapturedRegion()2886 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2887   if (FunctionScopes.empty())
2888     return nullptr;
2889 
2890   return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back());
2891 }
2892 
2893 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
getMismatchingDeleteExpressions() const2894 Sema::getMismatchingDeleteExpressions() const {
2895   return DeleteExprs;
2896 }
2897 
FPFeaturesStateRAII(Sema & S)2898 Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2899     : S(S), OldFPFeaturesState(S.CurFPFeatures),
2900       OldOverrides(S.FpPragmaStack.CurrentValue),
2901       OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2902       OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2903 
~FPFeaturesStateRAII()2904 Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2905   S.CurFPFeatures = OldFPFeaturesState;
2906   S.FpPragmaStack.CurrentValue = OldOverrides;
2907   S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod);
2908 }
2909 
isDeclaratorFunctionLike(Declarator & D)2910 bool Sema::isDeclaratorFunctionLike(Declarator &D) {
2911   assert(D.getCXXScopeSpec().isSet() &&
2912          "can only be called for qualified names");
2913 
2914   auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
2915                          LookupOrdinaryName, forRedeclarationInCurContext());
2916   DeclContext *DC = computeDeclContext(D.getCXXScopeSpec(),
2917                                        !D.getDeclSpec().isFriendSpecified());
2918   if (!DC)
2919     return false;
2920 
2921   LookupQualifiedName(LR, DC);
2922   bool Result = llvm::all_of(LR, [](Decl *Dcl) {
2923     if (NamedDecl *ND = dyn_cast<NamedDecl>(Dcl)) {
2924       ND = ND->getUnderlyingDecl();
2925       return isa<FunctionDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
2926              isa<UsingDecl>(ND);
2927     }
2928     return false;
2929   });
2930   return Result;
2931 }
2932 
CreateAnnotationAttr(const AttributeCommonInfo & CI,StringRef Annot,MutableArrayRef<Expr * > Args)2933 Attr *Sema::CreateAnnotationAttr(const AttributeCommonInfo &CI, StringRef Annot,
2934                                  MutableArrayRef<Expr *> Args) {
2935 
2936   auto *A = AnnotateAttr::Create(Context, Annot, Args.data(), Args.size(), CI);
2937   if (!ConstantFoldAttrArgs(
2938           CI, MutableArrayRef<Expr *>(A->args_begin(), A->args_end()))) {
2939     return nullptr;
2940   }
2941   return A;
2942 }
2943 
CreateAnnotationAttr(const ParsedAttr & AL)2944 Attr *Sema::CreateAnnotationAttr(const ParsedAttr &AL) {
2945   // Make sure that there is a string literal as the annotation's first
2946   // argument.
2947   StringRef Str;
2948   if (!checkStringLiteralArgumentAttr(AL, 0, Str))
2949     return nullptr;
2950 
2951   llvm::SmallVector<Expr *, 4> Args;
2952   Args.reserve(AL.getNumArgs() - 1);
2953   for (unsigned Idx = 1; Idx < AL.getNumArgs(); Idx++) {
2954     assert(!AL.isArgIdent(Idx));
2955     Args.push_back(AL.getArgAsExpr(Idx));
2956   }
2957 
2958   return CreateAnnotationAttr(AL, Str, Args);
2959 }
2960