xref: /freebsd/contrib/llvm-project/clang/include/clang/Sema/SemaCUDA.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1*0fca6ea1SDimitry Andric //===----- SemaCUDA.h ----- Semantic Analysis for CUDA constructs ---------===//
2*0fca6ea1SDimitry Andric //
3*0fca6ea1SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*0fca6ea1SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*0fca6ea1SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*0fca6ea1SDimitry Andric //
7*0fca6ea1SDimitry Andric //===----------------------------------------------------------------------===//
8*0fca6ea1SDimitry Andric /// \file
9*0fca6ea1SDimitry Andric /// This file declares semantic analysis for CUDA constructs.
10*0fca6ea1SDimitry Andric ///
11*0fca6ea1SDimitry Andric //===----------------------------------------------------------------------===//
12*0fca6ea1SDimitry Andric 
13*0fca6ea1SDimitry Andric #ifndef LLVM_CLANG_SEMA_SEMACUDA_H
14*0fca6ea1SDimitry Andric #define LLVM_CLANG_SEMA_SEMACUDA_H
15*0fca6ea1SDimitry Andric 
16*0fca6ea1SDimitry Andric #include "clang/AST/Decl.h"
17*0fca6ea1SDimitry Andric #include "clang/AST/DeclCXX.h"
18*0fca6ea1SDimitry Andric #include "clang/AST/Redeclarable.h"
19*0fca6ea1SDimitry Andric #include "clang/Basic/Cuda.h"
20*0fca6ea1SDimitry Andric #include "clang/Basic/SourceLocation.h"
21*0fca6ea1SDimitry Andric #include "clang/Sema/Lookup.h"
22*0fca6ea1SDimitry Andric #include "clang/Sema/Ownership.h"
23*0fca6ea1SDimitry Andric #include "clang/Sema/ParsedAttr.h"
24*0fca6ea1SDimitry Andric #include "clang/Sema/Scope.h"
25*0fca6ea1SDimitry Andric #include "clang/Sema/ScopeInfo.h"
26*0fca6ea1SDimitry Andric #include "clang/Sema/SemaBase.h"
27*0fca6ea1SDimitry Andric #include "llvm/ADT/DenseMap.h"
28*0fca6ea1SDimitry Andric #include "llvm/ADT/SmallVector.h"
29*0fca6ea1SDimitry Andric #include <string>
30*0fca6ea1SDimitry Andric 
31*0fca6ea1SDimitry Andric namespace clang {
32*0fca6ea1SDimitry Andric 
33*0fca6ea1SDimitry Andric enum class CUDAFunctionTarget;
34*0fca6ea1SDimitry Andric 
35*0fca6ea1SDimitry Andric class SemaCUDA : public SemaBase {
36*0fca6ea1SDimitry Andric public:
37*0fca6ea1SDimitry Andric   SemaCUDA(Sema &S);
38*0fca6ea1SDimitry Andric 
39*0fca6ea1SDimitry Andric   /// Increments our count of the number of times we've seen a pragma forcing
40*0fca6ea1SDimitry Andric   /// functions to be __host__ __device__.  So long as this count is greater
41*0fca6ea1SDimitry Andric   /// than zero, all functions encountered will be __host__ __device__.
42*0fca6ea1SDimitry Andric   void PushForceHostDevice();
43*0fca6ea1SDimitry Andric 
44*0fca6ea1SDimitry Andric   /// Decrements our count of the number of times we've seen a pragma forcing
45*0fca6ea1SDimitry Andric   /// functions to be __host__ __device__.  Returns false if the count is 0
46*0fca6ea1SDimitry Andric   /// before incrementing, so you can emit an error.
47*0fca6ea1SDimitry Andric   bool PopForceHostDevice();
48*0fca6ea1SDimitry Andric 
49*0fca6ea1SDimitry Andric   ExprResult ActOnExecConfigExpr(Scope *S, SourceLocation LLLLoc,
50*0fca6ea1SDimitry Andric                                  MultiExprArg ExecConfig,
51*0fca6ea1SDimitry Andric                                  SourceLocation GGGLoc);
52*0fca6ea1SDimitry Andric 
53*0fca6ea1SDimitry Andric   /// A pair of a canonical FunctionDecl and a SourceLocation.  When used as the
54*0fca6ea1SDimitry Andric   /// key in a hashtable, both the FD and location are hashed.
55*0fca6ea1SDimitry Andric   struct FunctionDeclAndLoc {
56*0fca6ea1SDimitry Andric     CanonicalDeclPtr<const FunctionDecl> FD;
57*0fca6ea1SDimitry Andric     SourceLocation Loc;
58*0fca6ea1SDimitry Andric   };
59*0fca6ea1SDimitry Andric 
60*0fca6ea1SDimitry Andric   /// FunctionDecls and SourceLocations for which CheckCall has emitted a
61*0fca6ea1SDimitry Andric   /// (maybe deferred) "bad call" diagnostic.  We use this to avoid emitting the
62*0fca6ea1SDimitry Andric   /// same deferred diag twice.
63*0fca6ea1SDimitry Andric   llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
64*0fca6ea1SDimitry Andric 
65*0fca6ea1SDimitry Andric   /// An inverse call graph, mapping known-emitted functions to one of their
66*0fca6ea1SDimitry Andric   /// known-emitted callers (plus the location of the call).
67*0fca6ea1SDimitry Andric   ///
68*0fca6ea1SDimitry Andric   /// Functions that we can tell a priori must be emitted aren't added to this
69*0fca6ea1SDimitry Andric   /// map.
70*0fca6ea1SDimitry Andric   llvm::DenseMap</* Callee = */ CanonicalDeclPtr<const FunctionDecl>,
71*0fca6ea1SDimitry Andric                  /* Caller = */ FunctionDeclAndLoc>
72*0fca6ea1SDimitry Andric       DeviceKnownEmittedFns;
73*0fca6ea1SDimitry Andric 
74*0fca6ea1SDimitry Andric   /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
75*0fca6ea1SDimitry Andric   /// context is "used as device code".
76*0fca6ea1SDimitry Andric   ///
77*0fca6ea1SDimitry Andric   /// - If CurContext is a __host__ function, does not emit any diagnostics
78*0fca6ea1SDimitry Andric   ///   unless \p EmitOnBothSides is true.
79*0fca6ea1SDimitry Andric   /// - If CurContext is a __device__ or __global__ function, emits the
80*0fca6ea1SDimitry Andric   ///   diagnostics immediately.
81*0fca6ea1SDimitry Andric   /// - If CurContext is a __host__ __device__ function and we are compiling for
82*0fca6ea1SDimitry Andric   ///   the device, creates a diagnostic which is emitted if and when we realize
83*0fca6ea1SDimitry Andric   ///   that the function will be codegen'ed.
84*0fca6ea1SDimitry Andric   ///
85*0fca6ea1SDimitry Andric   /// Example usage:
86*0fca6ea1SDimitry Andric   ///
87*0fca6ea1SDimitry Andric   ///  // Variable-length arrays are not allowed in CUDA device code.
88*0fca6ea1SDimitry Andric   ///  if (DiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentTarget())
89*0fca6ea1SDimitry Andric   ///    return ExprError();
90*0fca6ea1SDimitry Andric   ///  // Otherwise, continue parsing as normal.
91*0fca6ea1SDimitry Andric   SemaDiagnosticBuilder DiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
92*0fca6ea1SDimitry Andric 
93*0fca6ea1SDimitry Andric   /// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
94*0fca6ea1SDimitry Andric   /// context is "used as host code".
95*0fca6ea1SDimitry Andric   ///
96*0fca6ea1SDimitry Andric   /// Same as DiagIfDeviceCode, with "host" and "device" switched.
97*0fca6ea1SDimitry Andric   SemaDiagnosticBuilder DiagIfHostCode(SourceLocation Loc, unsigned DiagID);
98*0fca6ea1SDimitry Andric 
99*0fca6ea1SDimitry Andric   /// Determines whether the given function is a CUDA device/host/kernel/etc.
100*0fca6ea1SDimitry Andric   /// function.
101*0fca6ea1SDimitry Andric   ///
102*0fca6ea1SDimitry Andric   /// Use this rather than examining the function's attributes yourself -- you
103*0fca6ea1SDimitry Andric   /// will get it wrong.  Returns CUDAFunctionTarget::Host if D is null.
104*0fca6ea1SDimitry Andric   CUDAFunctionTarget IdentifyTarget(const FunctionDecl *D,
105*0fca6ea1SDimitry Andric                                     bool IgnoreImplicitHDAttr = false);
106*0fca6ea1SDimitry Andric   CUDAFunctionTarget IdentifyTarget(const ParsedAttributesView &Attrs);
107*0fca6ea1SDimitry Andric 
108*0fca6ea1SDimitry Andric   enum CUDAVariableTarget {
109*0fca6ea1SDimitry Andric     CVT_Device,  /// Emitted on device side with a shadow variable on host side
110*0fca6ea1SDimitry Andric     CVT_Host,    /// Emitted on host side only
111*0fca6ea1SDimitry Andric     CVT_Both,    /// Emitted on both sides with different addresses
112*0fca6ea1SDimitry Andric     CVT_Unified, /// Emitted as a unified address, e.g. managed variables
113*0fca6ea1SDimitry Andric   };
114*0fca6ea1SDimitry Andric   /// Determines whether the given variable is emitted on host or device side.
115*0fca6ea1SDimitry Andric   CUDAVariableTarget IdentifyTarget(const VarDecl *D);
116*0fca6ea1SDimitry Andric 
117*0fca6ea1SDimitry Andric   /// Defines kinds of CUDA global host/device context where a function may be
118*0fca6ea1SDimitry Andric   /// called.
119*0fca6ea1SDimitry Andric   enum CUDATargetContextKind {
120*0fca6ea1SDimitry Andric     CTCK_Unknown,       /// Unknown context
121*0fca6ea1SDimitry Andric     CTCK_InitGlobalVar, /// Function called during global variable
122*0fca6ea1SDimitry Andric                         /// initialization
123*0fca6ea1SDimitry Andric   };
124*0fca6ea1SDimitry Andric 
125*0fca6ea1SDimitry Andric   /// Define the current global CUDA host/device context where a function may be
126*0fca6ea1SDimitry Andric   /// called. Only used when a function is called outside of any functions.
127*0fca6ea1SDimitry Andric   struct CUDATargetContext {
128*0fca6ea1SDimitry Andric     CUDAFunctionTarget Target = CUDAFunctionTarget::HostDevice;
129*0fca6ea1SDimitry Andric     CUDATargetContextKind Kind = CTCK_Unknown;
130*0fca6ea1SDimitry Andric     Decl *D = nullptr;
131*0fca6ea1SDimitry Andric   } CurCUDATargetCtx;
132*0fca6ea1SDimitry Andric 
133*0fca6ea1SDimitry Andric   struct CUDATargetContextRAII {
134*0fca6ea1SDimitry Andric     SemaCUDA &S;
135*0fca6ea1SDimitry Andric     SemaCUDA::CUDATargetContext SavedCtx;
136*0fca6ea1SDimitry Andric     CUDATargetContextRAII(SemaCUDA &S_, SemaCUDA::CUDATargetContextKind K,
137*0fca6ea1SDimitry Andric                           Decl *D);
~CUDATargetContextRAIICUDATargetContextRAII138*0fca6ea1SDimitry Andric     ~CUDATargetContextRAII() { S.CurCUDATargetCtx = SavedCtx; }
139*0fca6ea1SDimitry Andric   };
140*0fca6ea1SDimitry Andric 
141*0fca6ea1SDimitry Andric   /// Gets the CUDA target for the current context.
CurrentTarget()142*0fca6ea1SDimitry Andric   CUDAFunctionTarget CurrentTarget() {
143*0fca6ea1SDimitry Andric     return IdentifyTarget(dyn_cast<FunctionDecl>(SemaRef.CurContext));
144*0fca6ea1SDimitry Andric   }
145*0fca6ea1SDimitry Andric 
146*0fca6ea1SDimitry Andric   static bool isImplicitHostDeviceFunction(const FunctionDecl *D);
147*0fca6ea1SDimitry Andric 
148*0fca6ea1SDimitry Andric   // CUDA function call preference. Must be ordered numerically from
149*0fca6ea1SDimitry Andric   // worst to best.
150*0fca6ea1SDimitry Andric   enum CUDAFunctionPreference {
151*0fca6ea1SDimitry Andric     CFP_Never,      // Invalid caller/callee combination.
152*0fca6ea1SDimitry Andric     CFP_WrongSide,  // Calls from host-device to host or device
153*0fca6ea1SDimitry Andric                     // function that do not match current compilation
154*0fca6ea1SDimitry Andric                     // mode.
155*0fca6ea1SDimitry Andric     CFP_HostDevice, // Any calls to host/device functions.
156*0fca6ea1SDimitry Andric     CFP_SameSide,   // Calls from host-device to host or device
157*0fca6ea1SDimitry Andric                     // function matching current compilation mode.
158*0fca6ea1SDimitry Andric     CFP_Native,     // host-to-host or device-to-device calls.
159*0fca6ea1SDimitry Andric   };
160*0fca6ea1SDimitry Andric 
161*0fca6ea1SDimitry Andric   /// Identifies relative preference of a given Caller/Callee
162*0fca6ea1SDimitry Andric   /// combination, based on their host/device attributes.
163*0fca6ea1SDimitry Andric   /// \param Caller function which needs address of \p Callee.
164*0fca6ea1SDimitry Andric   ///               nullptr in case of global context.
165*0fca6ea1SDimitry Andric   /// \param Callee target function
166*0fca6ea1SDimitry Andric   ///
167*0fca6ea1SDimitry Andric   /// \returns preference value for particular Caller/Callee combination.
168*0fca6ea1SDimitry Andric   CUDAFunctionPreference IdentifyPreference(const FunctionDecl *Caller,
169*0fca6ea1SDimitry Andric                                             const FunctionDecl *Callee);
170*0fca6ea1SDimitry Andric 
171*0fca6ea1SDimitry Andric   /// Determines whether Caller may invoke Callee, based on their CUDA
172*0fca6ea1SDimitry Andric   /// host/device attributes.  Returns false if the call is not allowed.
173*0fca6ea1SDimitry Andric   ///
174*0fca6ea1SDimitry Andric   /// Note: Will return true for CFP_WrongSide calls.  These may appear in
175*0fca6ea1SDimitry Andric   /// semantically correct CUDA programs, but only if they're never codegen'ed.
IsAllowedCall(const FunctionDecl * Caller,const FunctionDecl * Callee)176*0fca6ea1SDimitry Andric   bool IsAllowedCall(const FunctionDecl *Caller, const FunctionDecl *Callee) {
177*0fca6ea1SDimitry Andric     return IdentifyPreference(Caller, Callee) != CFP_Never;
178*0fca6ea1SDimitry Andric   }
179*0fca6ea1SDimitry Andric 
180*0fca6ea1SDimitry Andric   /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
181*0fca6ea1SDimitry Andric   /// depending on FD and the current compilation settings.
182*0fca6ea1SDimitry Andric   void maybeAddHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous);
183*0fca6ea1SDimitry Andric 
184*0fca6ea1SDimitry Andric   /// May add implicit CUDAConstantAttr attribute to VD, depending on VD
185*0fca6ea1SDimitry Andric   /// and current compilation settings.
186*0fca6ea1SDimitry Andric   void MaybeAddConstantAttr(VarDecl *VD);
187*0fca6ea1SDimitry Andric 
188*0fca6ea1SDimitry Andric   /// Check whether we're allowed to call Callee from the current context.
189*0fca6ea1SDimitry Andric   ///
190*0fca6ea1SDimitry Andric   /// - If the call is never allowed in a semantically-correct program
191*0fca6ea1SDimitry Andric   ///   (CFP_Never), emits an error and returns false.
192*0fca6ea1SDimitry Andric   ///
193*0fca6ea1SDimitry Andric   /// - If the call is allowed in semantically-correct programs, but only if
194*0fca6ea1SDimitry Andric   ///   it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
195*0fca6ea1SDimitry Andric   ///   be emitted if and when the caller is codegen'ed, and returns true.
196*0fca6ea1SDimitry Andric   ///
197*0fca6ea1SDimitry Andric   ///   Will only create deferred diagnostics for a given SourceLocation once,
198*0fca6ea1SDimitry Andric   ///   so you can safely call this multiple times without generating duplicate
199*0fca6ea1SDimitry Andric   ///   deferred errors.
200*0fca6ea1SDimitry Andric   ///
201*0fca6ea1SDimitry Andric   /// - Otherwise, returns true without emitting any diagnostics.
202*0fca6ea1SDimitry Andric   bool CheckCall(SourceLocation Loc, FunctionDecl *Callee);
203*0fca6ea1SDimitry Andric 
204*0fca6ea1SDimitry Andric   void CheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
205*0fca6ea1SDimitry Andric 
206*0fca6ea1SDimitry Andric   /// Set __device__ or __host__ __device__ attributes on the given lambda
207*0fca6ea1SDimitry Andric   /// operator() method.
208*0fca6ea1SDimitry Andric   ///
209*0fca6ea1SDimitry Andric   /// CUDA lambdas by default is host device function unless it has explicit
210*0fca6ea1SDimitry Andric   /// host or device attribute.
211*0fca6ea1SDimitry Andric   void SetLambdaAttrs(CXXMethodDecl *Method);
212*0fca6ea1SDimitry Andric 
213*0fca6ea1SDimitry Andric   /// Record \p FD if it is a CUDA/HIP implicit host device function used on
214*0fca6ea1SDimitry Andric   /// device side in device compilation.
215*0fca6ea1SDimitry Andric   void RecordImplicitHostDeviceFuncUsedByDevice(const FunctionDecl *FD);
216*0fca6ea1SDimitry Andric 
217*0fca6ea1SDimitry Andric   /// Finds a function in \p Matches with highest calling priority
218*0fca6ea1SDimitry Andric   /// from \p Caller context and erases all functions with lower
219*0fca6ea1SDimitry Andric   /// calling priority.
220*0fca6ea1SDimitry Andric   void EraseUnwantedMatches(
221*0fca6ea1SDimitry Andric       const FunctionDecl *Caller,
222*0fca6ea1SDimitry Andric       llvm::SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>>
223*0fca6ea1SDimitry Andric           &Matches);
224*0fca6ea1SDimitry Andric 
225*0fca6ea1SDimitry Andric   /// Given a implicit special member, infer its CUDA target from the
226*0fca6ea1SDimitry Andric   /// calls it needs to make to underlying base/field special members.
227*0fca6ea1SDimitry Andric   /// \param ClassDecl the class for which the member is being created.
228*0fca6ea1SDimitry Andric   /// \param CSM the kind of special member.
229*0fca6ea1SDimitry Andric   /// \param MemberDecl the special member itself.
230*0fca6ea1SDimitry Andric   /// \param ConstRHS true if this is a copy operation with a const object on
231*0fca6ea1SDimitry Andric   ///        its RHS.
232*0fca6ea1SDimitry Andric   /// \param Diagnose true if this call should emit diagnostics.
233*0fca6ea1SDimitry Andric   /// \return true if there was an error inferring.
234*0fca6ea1SDimitry Andric   /// The result of this call is implicit CUDA target attribute(s) attached to
235*0fca6ea1SDimitry Andric   /// the member declaration.
236*0fca6ea1SDimitry Andric   bool inferTargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
237*0fca6ea1SDimitry Andric                                            CXXSpecialMemberKind CSM,
238*0fca6ea1SDimitry Andric                                            CXXMethodDecl *MemberDecl,
239*0fca6ea1SDimitry Andric                                            bool ConstRHS, bool Diagnose);
240*0fca6ea1SDimitry Andric 
241*0fca6ea1SDimitry Andric   /// \return true if \p CD can be considered empty according to CUDA
242*0fca6ea1SDimitry Andric   /// (E.2.3.1 in CUDA 7.5 Programming guide).
243*0fca6ea1SDimitry Andric   bool isEmptyConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
244*0fca6ea1SDimitry Andric   bool isEmptyDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
245*0fca6ea1SDimitry Andric 
246*0fca6ea1SDimitry Andric   // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
247*0fca6ea1SDimitry Andric   // case of error emits appropriate diagnostic and invalidates \p Var.
248*0fca6ea1SDimitry Andric   //
249*0fca6ea1SDimitry Andric   // \details CUDA allows only empty constructors as initializers for global
250*0fca6ea1SDimitry Andric   // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
251*0fca6ea1SDimitry Andric   // __shared__ variables whether they are local or not (they all are implicitly
252*0fca6ea1SDimitry Andric   // static in CUDA). One exception is that CUDA allows constant initializers
253*0fca6ea1SDimitry Andric   // for __constant__ and __device__ variables.
254*0fca6ea1SDimitry Andric   void checkAllowedInitializer(VarDecl *VD);
255*0fca6ea1SDimitry Andric 
256*0fca6ea1SDimitry Andric   /// Check whether NewFD is a valid overload for CUDA. Emits
257*0fca6ea1SDimitry Andric   /// diagnostics and invalidates NewFD if not.
258*0fca6ea1SDimitry Andric   void checkTargetOverload(FunctionDecl *NewFD, const LookupResult &Previous);
259*0fca6ea1SDimitry Andric   /// Copies target attributes from the template TD to the function FD.
260*0fca6ea1SDimitry Andric   void inheritTargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
261*0fca6ea1SDimitry Andric 
262*0fca6ea1SDimitry Andric   /// Returns the name of the launch configuration function.  This is the name
263*0fca6ea1SDimitry Andric   /// of the function that will be called to configure kernel call, with the
264*0fca6ea1SDimitry Andric   /// parameters specified via <<<>>>.
265*0fca6ea1SDimitry Andric   std::string getConfigureFuncName() const;
266*0fca6ea1SDimitry Andric 
267*0fca6ea1SDimitry Andric private:
268*0fca6ea1SDimitry Andric   unsigned ForceHostDeviceDepth = 0;
269*0fca6ea1SDimitry Andric 
270*0fca6ea1SDimitry Andric   friend class ASTReader;
271*0fca6ea1SDimitry Andric   friend class ASTWriter;
272*0fca6ea1SDimitry Andric };
273*0fca6ea1SDimitry Andric 
274*0fca6ea1SDimitry Andric } // namespace clang
275*0fca6ea1SDimitry Andric 
276*0fca6ea1SDimitry Andric namespace llvm {
277*0fca6ea1SDimitry Andric // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
278*0fca6ea1SDimitry Andric // SourceLocation.
279*0fca6ea1SDimitry Andric template <> struct DenseMapInfo<clang::SemaCUDA::FunctionDeclAndLoc> {
280*0fca6ea1SDimitry Andric   using FunctionDeclAndLoc = clang::SemaCUDA::FunctionDeclAndLoc;
281*0fca6ea1SDimitry Andric   using FDBaseInfo =
282*0fca6ea1SDimitry Andric       DenseMapInfo<clang::CanonicalDeclPtr<const clang::FunctionDecl>>;
283*0fca6ea1SDimitry Andric 
284*0fca6ea1SDimitry Andric   static FunctionDeclAndLoc getEmptyKey() {
285*0fca6ea1SDimitry Andric     return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
286*0fca6ea1SDimitry Andric   }
287*0fca6ea1SDimitry Andric 
288*0fca6ea1SDimitry Andric   static FunctionDeclAndLoc getTombstoneKey() {
289*0fca6ea1SDimitry Andric     return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
290*0fca6ea1SDimitry Andric   }
291*0fca6ea1SDimitry Andric 
292*0fca6ea1SDimitry Andric   static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
293*0fca6ea1SDimitry Andric     return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
294*0fca6ea1SDimitry Andric                         FDL.Loc.getHashValue());
295*0fca6ea1SDimitry Andric   }
296*0fca6ea1SDimitry Andric 
297*0fca6ea1SDimitry Andric   static bool isEqual(const FunctionDeclAndLoc &LHS,
298*0fca6ea1SDimitry Andric                       const FunctionDeclAndLoc &RHS) {
299*0fca6ea1SDimitry Andric     return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
300*0fca6ea1SDimitry Andric   }
301*0fca6ea1SDimitry Andric };
302*0fca6ea1SDimitry Andric } // namespace llvm
303*0fca6ea1SDimitry Andric 
304*0fca6ea1SDimitry Andric #endif // LLVM_CLANG_SEMA_SEMACUDA_H
305