xref: /freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- llvm/CodeGen/GlobalISel/CallLowering.h - Call lowering ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file describes how to lower LLVM calls to machine code calls.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
15 #define LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
16 
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MachineOperand.h"
21 #include "llvm/CodeGen/TargetCallingConv.h"
22 #include "llvm/CodeGenTypes/LowLevelType.h"
23 #include "llvm/CodeGenTypes/MachineValueType.h"
24 #include "llvm/IR/CallingConv.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include <cstdint>
29 #include <functional>
30 
31 namespace llvm {
32 
33 class AttributeList;
34 class CallBase;
35 class DataLayout;
36 class Function;
37 class FunctionLoweringInfo;
38 class MachineIRBuilder;
39 class MachineFunction;
40 struct MachinePointerInfo;
41 class MachineRegisterInfo;
42 class TargetLowering;
43 
44 class CallLowering {
45   const TargetLowering *TLI;
46 
47   virtual void anchor();
48 public:
49   struct BaseArgInfo {
50     Type *Ty;
51     SmallVector<ISD::ArgFlagsTy, 4> Flags;
52     bool IsFixed;
53 
54     BaseArgInfo(Type *Ty,
55                 ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
56                 bool IsFixed = true)
TyBaseArgInfo57         : Ty(Ty), Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) {}
58 
BaseArgInfoBaseArgInfo59     BaseArgInfo() : Ty(nullptr), IsFixed(false) {}
60   };
61 
62   struct ArgInfo : public BaseArgInfo {
63     SmallVector<Register, 4> Regs;
64     // If the argument had to be split into multiple parts according to the
65     // target calling convention, then this contains the original vregs
66     // if the argument was an incoming arg.
67     SmallVector<Register, 2> OrigRegs;
68 
69     /// Optionally track the original IR value for the argument. This may not be
70     /// meaningful in all contexts. This should only be used on for forwarding
71     /// through to use for aliasing information in MachinePointerInfo for memory
72     /// arguments.
73     const Value *OrigValue = nullptr;
74 
75     /// Index original Function's argument.
76     unsigned OrigArgIndex;
77 
78     /// Sentinel value for implicit machine-level input arguments.
79     static const unsigned NoArgIndex = UINT_MAX;
80 
81     ArgInfo(ArrayRef<Register> Regs, Type *Ty, unsigned OrigIndex,
82             ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
83             bool IsFixed = true, const Value *OrigValue = nullptr)
BaseArgInfoArgInfo84         : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs.begin(), Regs.end()),
85           OrigValue(OrigValue), OrigArgIndex(OrigIndex) {
86       if (!Regs.empty() && Flags.empty())
87         this->Flags.push_back(ISD::ArgFlagsTy());
88       // FIXME: We should have just one way of saying "no register".
89       assert(((Ty->isVoidTy() || Ty->isEmptyTy()) ==
90               (Regs.empty() || Regs[0] == 0)) &&
91              "only void types should have no register");
92     }
93 
94     ArgInfo(ArrayRef<Register> Regs, const Value &OrigValue, unsigned OrigIndex,
95             ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
96             bool IsFixed = true)
97       : ArgInfo(Regs, OrigValue.getType(), OrigIndex, Flags, IsFixed, &OrigValue) {}
98 
99     ArgInfo() = default;
100   };
101 
102   struct PtrAuthInfo {
103     uint64_t Key;
104     Register Discriminator;
105   };
106 
107   struct CallLoweringInfo {
108     /// Calling convention to be used for the call.
109     CallingConv::ID CallConv = CallingConv::C;
110 
111     /// Destination of the call. It should be either a register, globaladdress,
112     /// or externalsymbol.
113     MachineOperand Callee = MachineOperand::CreateImm(0);
114 
115     /// Descriptor for the return type of the function.
116     ArgInfo OrigRet;
117 
118     /// List of descriptors of the arguments passed to the function.
119     SmallVector<ArgInfo, 32> OrigArgs;
120 
121     /// Valid if the call has a swifterror inout parameter, and contains the
122     /// vreg that the swifterror should be copied into after the call.
123     Register SwiftErrorVReg;
124 
125     /// Valid if the call is a controlled convergent operation.
126     Register ConvergenceCtrlToken;
127 
128     /// Original IR callsite corresponding to this call, if available.
129     const CallBase *CB = nullptr;
130 
131     MDNode *KnownCallees = nullptr;
132 
133     /// The auth-call information in the "ptrauth" bundle, if present.
134     std::optional<PtrAuthInfo> PAI;
135 
136     /// True if the call must be tail call optimized.
137     bool IsMustTailCall = false;
138 
139     /// True if the call passes all target-independent checks for tail call
140     /// optimization.
141     bool IsTailCall = false;
142 
143     /// True if the call was lowered as a tail call. This is consumed by the
144     /// legalizer. This allows the legalizer to lower libcalls as tail calls.
145     bool LoweredTailCall = false;
146 
147     /// True if the call is to a vararg function.
148     bool IsVarArg = false;
149 
150     /// True if the function's return value can be lowered to registers.
151     bool CanLowerReturn = true;
152 
153     /// VReg to hold the hidden sret parameter.
154     Register DemoteRegister;
155 
156     /// The stack index for sret demotion.
157     int DemoteStackIndex;
158 
159     /// Expected type identifier for indirect calls with a CFI check.
160     const ConstantInt *CFIType = nullptr;
161 
162     /// True if this call results in convergent operations.
163     bool IsConvergent = true;
164   };
165 
166   /// Argument handling is mostly uniform between the four places that
167   /// make these decisions: function formal arguments, call
168   /// instruction args, call instruction returns and function
169   /// returns. However, once a decision has been made on where an
170   /// argument should go, exactly what happens can vary slightly. This
171   /// class abstracts the differences.
172   ///
173   /// ValueAssigner should not depend on any specific function state, and
174   /// only determine the types and locations for arguments.
175   struct ValueAssigner {
176     ValueAssigner(bool IsIncoming, CCAssignFn *AssignFn_,
177                   CCAssignFn *AssignFnVarArg_ = nullptr)
AssignFnValueAssigner178         : AssignFn(AssignFn_), AssignFnVarArg(AssignFnVarArg_),
179           IsIncomingArgumentHandler(IsIncoming) {
180 
181       // Some targets change the handler depending on whether the call is
182       // varargs or not. If
183       if (!AssignFnVarArg)
184         AssignFnVarArg = AssignFn;
185     }
186 
187     virtual ~ValueAssigner() = default;
188 
189     /// Returns true if the handler is dealing with incoming arguments,
190     /// i.e. those that move values from some physical location to vregs.
isIncomingArgumentHandlerValueAssigner191     bool isIncomingArgumentHandler() const {
192       return IsIncomingArgumentHandler;
193     }
194 
195     /// Wrap call to (typically tablegenerated CCAssignFn). This may be
196     /// overridden to track additional state information as arguments are
197     /// assigned or apply target specific hacks around the legacy
198     /// infrastructure.
assignArgValueAssigner199     virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
200                            CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
201                            ISD::ArgFlagsTy Flags, CCState &State) {
202       if (getAssignFn(State.isVarArg())(ValNo, ValVT, LocVT, LocInfo, Flags,
203                                         State))
204         return true;
205       StackSize = State.getStackSize();
206       return false;
207     }
208 
209     /// Assignment function to use for a general call.
210     CCAssignFn *AssignFn;
211 
212     /// Assignment function to use for a variadic call. This is usually the same
213     /// as AssignFn on most targets.
214     CCAssignFn *AssignFnVarArg;
215 
216     /// The size of the currently allocated portion of the stack.
217     uint64_t StackSize = 0;
218 
219     /// Select the appropriate assignment function depending on whether this is
220     /// a variadic call.
getAssignFnValueAssigner221     CCAssignFn *getAssignFn(bool IsVarArg) const {
222       return IsVarArg ? AssignFnVarArg : AssignFn;
223     }
224 
225   private:
226     const bool IsIncomingArgumentHandler;
227     virtual void anchor();
228   };
229 
230   struct IncomingValueAssigner : public ValueAssigner {
231     IncomingValueAssigner(CCAssignFn *AssignFn_,
232                           CCAssignFn *AssignFnVarArg_ = nullptr)
ValueAssignerIncomingValueAssigner233         : ValueAssigner(true, AssignFn_, AssignFnVarArg_) {}
234   };
235 
236   struct OutgoingValueAssigner : public ValueAssigner {
237     OutgoingValueAssigner(CCAssignFn *AssignFn_,
238                           CCAssignFn *AssignFnVarArg_ = nullptr)
ValueAssignerOutgoingValueAssigner239         : ValueAssigner(false, AssignFn_, AssignFnVarArg_) {}
240   };
241 
242   struct ValueHandler {
243     MachineIRBuilder &MIRBuilder;
244     MachineRegisterInfo &MRI;
245     const bool IsIncomingArgumentHandler;
246 
ValueHandlerValueHandler247     ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
248                  MachineRegisterInfo &MRI)
249         : MIRBuilder(MIRBuilder), MRI(MRI),
250           IsIncomingArgumentHandler(IsIncoming) {}
251 
252     virtual ~ValueHandler() = default;
253 
254     /// Returns true if the handler is dealing with incoming arguments,
255     /// i.e. those that move values from some physical location to vregs.
isIncomingArgumentHandlerValueHandler256     bool isIncomingArgumentHandler() const {
257       return IsIncomingArgumentHandler;
258     }
259 
260     /// Materialize a VReg containing the address of the specified
261     /// stack-based object. This is either based on a FrameIndex or
262     /// direct SP manipulation, depending on the context. \p MPO
263     /// should be initialized to an appropriate description of the
264     /// address created.
265     virtual Register getStackAddress(uint64_t MemSize, int64_t Offset,
266                                      MachinePointerInfo &MPO,
267                                      ISD::ArgFlagsTy Flags) = 0;
268 
269     /// Return the in-memory size to write for the argument at \p VA. This may
270     /// be smaller than the allocated stack slot size.
271     ///
272     /// This is overridable primarily for targets to maintain compatibility with
273     /// hacks around the existing DAG call lowering infrastructure.
274     virtual LLT getStackValueStoreType(const DataLayout &DL,
275                                        const CCValAssign &VA,
276                                        ISD::ArgFlagsTy Flags) const;
277 
278     /// The specified value has been assigned to a physical register,
279     /// handle the appropriate COPY (either to or from) and mark any
280     /// relevant uses/defines as needed.
281     virtual void assignValueToReg(Register ValVReg, Register PhysReg,
282                                   const CCValAssign &VA) = 0;
283 
284     /// The specified value has been assigned to a stack
285     /// location. Load or store it there, with appropriate extension
286     /// if necessary.
287     virtual void assignValueToAddress(Register ValVReg, Register Addr,
288                                       LLT MemTy, const MachinePointerInfo &MPO,
289                                       const CCValAssign &VA) = 0;
290 
291     /// An overload which takes an ArgInfo if additional information about the
292     /// arg is needed. \p ValRegIndex is the index in \p Arg.Regs for the value
293     /// to store.
assignValueToAddressValueHandler294     virtual void assignValueToAddress(const ArgInfo &Arg, unsigned ValRegIndex,
295                                       Register Addr, LLT MemTy,
296                                       const MachinePointerInfo &MPO,
297                                       const CCValAssign &VA) {
298       assignValueToAddress(Arg.Regs[ValRegIndex], Addr, MemTy, MPO, VA);
299     }
300 
301     /// Handle custom values, which may be passed into one or more of \p VAs.
302     /// \p If the handler wants the assignments to be delayed until after
303     /// mem loc assignments, then it sets \p Thunk to the thunk to do the
304     /// assignment.
305     /// \return The number of \p VAs that have been assigned including the
306     ///         first one, and which should therefore be skipped from further
307     ///         processing.
308     virtual unsigned assignCustomValue(ArgInfo &Arg, ArrayRef<CCValAssign> VAs,
309                                        std::function<void()> *Thunk = nullptr) {
310       // This is not a pure virtual method because not all targets need to worry
311       // about custom values.
312       llvm_unreachable("Custom values not supported");
313     }
314 
315     /// Do a memory copy of \p MemSize bytes from \p SrcPtr to \p DstPtr. This
316     /// is necessary for outgoing stack-passed byval arguments.
317     void
318     copyArgumentMemory(const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
319                        const MachinePointerInfo &DstPtrInfo, Align DstAlign,
320                        const MachinePointerInfo &SrcPtrInfo, Align SrcAlign,
321                        uint64_t MemSize, CCValAssign &VA) const;
322 
323     /// Extend a register to the location type given in VA, capped at extending
324     /// to at most MaxSize bits. If MaxSizeBits is 0 then no maximum is set.
325     Register extendRegister(Register ValReg, const CCValAssign &VA,
326                             unsigned MaxSizeBits = 0);
327   };
328 
329   /// Base class for ValueHandlers used for arguments coming into the current
330   /// function, or for return values received from a call.
331   struct IncomingValueHandler : public ValueHandler {
IncomingValueHandlerIncomingValueHandler332     IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
333         : ValueHandler(/*IsIncoming*/ true, MIRBuilder, MRI) {}
334 
335     /// Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on \p
336     /// VA, returning the new register if a hint was inserted.
337     Register buildExtensionHint(const CCValAssign &VA, Register SrcReg,
338                                 LLT NarrowTy);
339 
340     /// Provides a default implementation for argument handling.
341     void assignValueToReg(Register ValVReg, Register PhysReg,
342                           const CCValAssign &VA) override;
343   };
344 
345   /// Base class for ValueHandlers used for arguments passed to a function call,
346   /// or for return values.
347   struct OutgoingValueHandler : public ValueHandler {
OutgoingValueHandlerOutgoingValueHandler348     OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
349         : ValueHandler(/*IsIncoming*/ false, MIRBuilder, MRI) {}
350   };
351 
352 protected:
353   /// Getter for generic TargetLowering class.
getTLI()354   const TargetLowering *getTLI() const {
355     return TLI;
356   }
357 
358   /// Getter for target specific TargetLowering class.
359   template <class XXXTargetLowering>
getTLI()360     const XXXTargetLowering *getTLI() const {
361     return static_cast<const XXXTargetLowering *>(TLI);
362   }
363 
364   /// \returns Flags corresponding to the attributes on the \p ArgIdx-th
365   /// parameter of \p Call.
366   ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call,
367                                          unsigned ArgIdx) const;
368 
369   /// \returns Flags corresponding to the attributes on the return from \p Call.
370   ISD::ArgFlagsTy getAttributesForReturn(const CallBase &Call) const;
371 
372   /// Adds flags to \p Flags based off of the attributes in \p Attrs.
373   /// \p OpIdx is the index in \p Attrs to add flags from.
374   void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
375                                  const AttributeList &Attrs,
376                                  unsigned OpIdx) const;
377 
378   template <typename FuncInfoTy>
379   void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL,
380                    const FuncInfoTy &FuncInfo) const;
381 
382   /// Break \p OrigArgInfo into one or more pieces the calling convention can
383   /// process, returned in \p SplitArgs. For example, this should break structs
384   /// down into individual fields.
385   ///
386   /// If \p Offsets is non-null, it points to a vector to be filled in
387   /// with the in-memory offsets of each of the individual values.
388   void splitToValueTypes(const ArgInfo &OrigArgInfo,
389                          SmallVectorImpl<ArgInfo> &SplitArgs,
390                          const DataLayout &DL, CallingConv::ID CallConv,
391                          SmallVectorImpl<uint64_t> *Offsets = nullptr) const;
392 
393   /// Analyze the argument list in \p Args, using \p Assigner to populate \p
394   /// CCInfo. This will determine the types and locations to use for passed or
395   /// returned values. This may resize fields in \p Args if the value is split
396   /// across multiple registers or stack slots.
397   ///
398   /// This is independent of the function state and can be used
399   /// to determine how a call would pass arguments without needing to change the
400   /// function. This can be used to check if arguments are suitable for tail
401   /// call lowering.
402   ///
403   /// \return True if everything has succeeded, false otherwise.
404   bool determineAssignments(ValueAssigner &Assigner,
405                             SmallVectorImpl<ArgInfo> &Args,
406                             CCState &CCInfo) const;
407 
408   /// Invoke ValueAssigner::assignArg on each of the given \p Args and then use
409   /// \p Handler to move them to the assigned locations.
410   ///
411   /// \return True if everything has succeeded, false otherwise.
412   bool determineAndHandleAssignments(
413       ValueHandler &Handler, ValueAssigner &Assigner,
414       SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
415       CallingConv::ID CallConv, bool IsVarArg,
416       ArrayRef<Register> ThisReturnRegs = std::nullopt) const;
417 
418   /// Use \p Handler to insert code to handle the argument/return values
419   /// represented by \p Args. It's expected determineAssignments previously
420   /// processed these arguments to populate \p CCState and \p ArgLocs.
421   bool
422   handleAssignments(ValueHandler &Handler, SmallVectorImpl<ArgInfo> &Args,
423                     CCState &CCState, SmallVectorImpl<CCValAssign> &ArgLocs,
424                     MachineIRBuilder &MIRBuilder,
425                     ArrayRef<Register> ThisReturnRegs = std::nullopt) const;
426 
427   /// Check whether parameters to a call that are passed in callee saved
428   /// registers are the same as from the calling function.  This needs to be
429   /// checked for tail call eligibility.
430   bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
431                             const uint32_t *CallerPreservedMask,
432                             const SmallVectorImpl<CCValAssign> &ArgLocs,
433                             const SmallVectorImpl<ArgInfo> &OutVals) const;
434 
435   /// \returns True if the calling convention for a callee and its caller pass
436   /// results in the same way. Typically used for tail call eligibility checks.
437   ///
438   /// \p Info is the CallLoweringInfo for the call.
439   /// \p MF is the MachineFunction for the caller.
440   /// \p InArgs contains the results of the call.
441   /// \p CalleeAssigner specifies the target's handling of the argument types
442   /// for the callee.
443   /// \p CallerAssigner specifies the target's handling of the
444   /// argument types for the caller.
445   bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF,
446                          SmallVectorImpl<ArgInfo> &InArgs,
447                          ValueAssigner &CalleeAssigner,
448                          ValueAssigner &CallerAssigner) const;
449 
450 public:
CallLowering(const TargetLowering * TLI)451   CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
452   virtual ~CallLowering() = default;
453 
454   /// \return true if the target is capable of handling swifterror values that
455   /// have been promoted to a specified register. The extended versions of
456   /// lowerReturn and lowerCall should be implemented.
supportSwiftError()457   virtual bool supportSwiftError() const {
458     return false;
459   }
460 
461   /// Load the returned value from the stack into virtual registers in \p VRegs.
462   /// It uses the frame index \p FI and the start offset from \p DemoteReg.
463   /// The loaded data size will be determined from \p RetTy.
464   void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
465                        ArrayRef<Register> VRegs, Register DemoteReg,
466                        int FI) const;
467 
468   /// Store the return value given by \p VRegs into stack starting at the offset
469   /// specified in \p DemoteReg.
470   void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
471                         ArrayRef<Register> VRegs, Register DemoteReg) const;
472 
473   /// Insert the hidden sret ArgInfo to the beginning of \p SplitArgs.
474   /// This function should be called from the target specific
475   /// lowerFormalArguments when \p F requires the sret demotion.
476   void insertSRetIncomingArgument(const Function &F,
477                                   SmallVectorImpl<ArgInfo> &SplitArgs,
478                                   Register &DemoteReg, MachineRegisterInfo &MRI,
479                                   const DataLayout &DL) const;
480 
481   /// For the call-base described by \p CB, insert the hidden sret ArgInfo to
482   /// the OrigArgs field of \p Info.
483   void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
484                                   const CallBase &CB,
485                                   CallLoweringInfo &Info) const;
486 
487   /// \return True if the return type described by \p Outs can be returned
488   /// without performing sret demotion.
489   bool checkReturn(CCState &CCInfo, SmallVectorImpl<BaseArgInfo> &Outs,
490                    CCAssignFn *Fn) const;
491 
492   /// Get the type and the ArgFlags for the split components of \p RetTy as
493   /// returned by \c ComputeValueVTs.
494   void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs,
495                      SmallVectorImpl<BaseArgInfo> &Outs,
496                      const DataLayout &DL) const;
497 
498   /// Toplevel function to check the return type based on the target calling
499   /// convention. \return True if the return value of \p MF can be returned
500   /// without performing sret demotion.
501   bool checkReturnTypeForCallConv(MachineFunction &MF) const;
502 
503   /// This hook must be implemented to check whether the return values
504   /// described by \p Outs can fit into the return registers. If false
505   /// is returned, an sret-demotion is performed.
canLowerReturn(MachineFunction & MF,CallingConv::ID CallConv,SmallVectorImpl<BaseArgInfo> & Outs,bool IsVarArg)506   virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
507                               SmallVectorImpl<BaseArgInfo> &Outs,
508                               bool IsVarArg) const {
509     return true;
510   }
511 
512   /// This hook must be implemented to lower outgoing return values, described
513   /// by \p Val, into the specified virtual registers \p VRegs.
514   /// This hook is used by GlobalISel.
515   ///
516   /// \p FLI is required for sret demotion.
517   ///
518   /// \p SwiftErrorVReg is non-zero if the function has a swifterror parameter
519   /// that needs to be implicitly returned.
520   ///
521   /// \return True if the lowering succeeds, false otherwise.
lowerReturn(MachineIRBuilder & MIRBuilder,const Value * Val,ArrayRef<Register> VRegs,FunctionLoweringInfo & FLI,Register SwiftErrorVReg)522   virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
523                            ArrayRef<Register> VRegs, FunctionLoweringInfo &FLI,
524                            Register SwiftErrorVReg) const {
525     if (!supportSwiftError()) {
526       assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
527       return lowerReturn(MIRBuilder, Val, VRegs, FLI);
528     }
529     return false;
530   }
531 
532   /// This hook behaves as the extended lowerReturn function, but for targets
533   /// that do not support swifterror value promotion.
lowerReturn(MachineIRBuilder & MIRBuilder,const Value * Val,ArrayRef<Register> VRegs,FunctionLoweringInfo & FLI)534   virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
535                            ArrayRef<Register> VRegs,
536                            FunctionLoweringInfo &FLI) const {
537     return false;
538   }
539 
fallBackToDAGISel(const MachineFunction & MF)540   virtual bool fallBackToDAGISel(const MachineFunction &MF) const {
541     return false;
542   }
543 
544   /// This hook must be implemented to lower the incoming (formal)
545   /// arguments, described by \p VRegs, for GlobalISel. Each argument
546   /// must end up in the related virtual registers described by \p VRegs.
547   /// In other words, the first argument should end up in \c VRegs[0],
548   /// the second in \c VRegs[1], and so on. For each argument, there will be one
549   /// register for each non-aggregate type, as returned by \c computeValueLLTs.
550   /// \p MIRBuilder is set to the proper insertion for the argument
551   /// lowering. \p FLI is required for sret demotion.
552   ///
553   /// \return True if the lowering succeeded, false otherwise.
lowerFormalArguments(MachineIRBuilder & MIRBuilder,const Function & F,ArrayRef<ArrayRef<Register>> VRegs,FunctionLoweringInfo & FLI)554   virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
555                                     const Function &F,
556                                     ArrayRef<ArrayRef<Register>> VRegs,
557                                     FunctionLoweringInfo &FLI) const {
558     return false;
559   }
560 
561   /// This hook must be implemented to lower the given call instruction,
562   /// including argument and return value marshalling.
563   ///
564   ///
565   /// \return true if the lowering succeeded, false otherwise.
lowerCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info)566   virtual bool lowerCall(MachineIRBuilder &MIRBuilder,
567                          CallLoweringInfo &Info) const {
568     return false;
569   }
570 
571   /// Lower the given call instruction, including argument and return value
572   /// marshalling.
573   ///
574   /// \p CI is the call/invoke instruction.
575   ///
576   /// \p ResRegs are the registers where the call's return value should be
577   /// stored (or 0 if there is no return value). There will be one register for
578   /// each non-aggregate type, as returned by \c computeValueLLTs.
579   ///
580   /// \p ArgRegs is a list of lists of virtual registers containing each
581   /// argument that needs to be passed (argument \c i should be placed in \c
582   /// ArgRegs[i]). For each argument, there will be one register for each
583   /// non-aggregate type, as returned by \c computeValueLLTs.
584   ///
585   /// \p SwiftErrorVReg is non-zero if the call has a swifterror inout
586   /// parameter, and contains the vreg that the swifterror should be copied into
587   /// after the call.
588   ///
589   /// \p GetCalleeReg is a callback to materialize a register for the callee if
590   /// the target determines it cannot jump to the destination based purely on \p
591   /// CI. This might be because \p CI is indirect, or because of the limited
592   /// range of an immediate jump.
593   ///
594   /// \return true if the lowering succeeded, false otherwise.
595   bool lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &Call,
596                  ArrayRef<Register> ResRegs,
597                  ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
598                  std::optional<PtrAuthInfo> PAI, Register ConvergenceCtrlToken,
599                  std::function<unsigned()> GetCalleeReg) const;
600 
601   /// For targets which want to use big-endian can enable it with
602   /// enableBigEndian() hook
enableBigEndian()603   virtual bool enableBigEndian() const { return false; }
604 
605   /// For targets which support the "returned" parameter attribute, returns
606   /// true if the given type is a valid one to use with "returned".
isTypeIsValidForThisReturn(EVT Ty)607   virtual bool isTypeIsValidForThisReturn(EVT Ty) const { return false; }
608 };
609 
610 } // end namespace llvm
611 
612 #endif // LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
613