xref: /freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //== llvm/CodeGen/GlobalISel/LegalizerHelper.h ---------------- -*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file A pass to convert the target-illegal operations created by IR -> MIR
10 /// translation into ones the target expects to be able to select. This may
11 /// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
12 /// G_ADD <4 x i16>.
13 ///
14 /// The LegalizerHelper class is where most of the work happens, and is
15 /// designed to be callable from other passes that find themselves with an
16 /// illegal instruction.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZERHELPER_H
21 #define LLVM_CODEGEN_GLOBALISEL_LEGALIZERHELPER_H
22 
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
25 #include "llvm/CodeGen/RuntimeLibcalls.h"
26 #include "llvm/CodeGen/TargetOpcodes.h"
27 
28 namespace llvm {
29 // Forward declarations.
30 class APInt;
31 class GAnyLoad;
32 class GLoadStore;
33 class GStore;
34 class GenericMachineInstr;
35 class MachineFunction;
36 class MachineIRBuilder;
37 class MachineInstr;
38 class MachineInstrBuilder;
39 struct MachinePointerInfo;
40 template <typename T> class SmallVectorImpl;
41 class LegalizerInfo;
42 class MachineRegisterInfo;
43 class GISelChangeObserver;
44 class LostDebugLocObserver;
45 class TargetLowering;
46 
47 class LegalizerHelper {
48 public:
49   /// Expose MIRBuilder so clients can set their own RecordInsertInstruction
50   /// functions
51   MachineIRBuilder &MIRBuilder;
52 
53   /// To keep track of changes made by the LegalizerHelper.
54   GISelChangeObserver &Observer;
55 
56 private:
57   MachineRegisterInfo &MRI;
58   const LegalizerInfo &LI;
59   const TargetLowering &TLI;
60   GISelKnownBits *KB;
61 
62 public:
63   enum LegalizeResult {
64     /// Instruction was already legal and no change was made to the
65     /// MachineFunction.
66     AlreadyLegal,
67 
68     /// Instruction has been legalized and the MachineFunction changed.
69     Legalized,
70 
71     /// Some kind of error has occurred and we could not legalize this
72     /// instruction.
73     UnableToLegalize,
74   };
75 
76   /// Expose LegalizerInfo so the clients can re-use.
77   const LegalizerInfo &getLegalizerInfo() const { return LI; }
78   const TargetLowering &getTargetLowering() const { return TLI; }
79   GISelKnownBits *getKnownBits() const { return KB; }
80 
81   LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
82                   MachineIRBuilder &B);
83   LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
84                   GISelChangeObserver &Observer, MachineIRBuilder &B,
85                   GISelKnownBits *KB = nullptr);
86 
87   /// Replace \p MI by a sequence of legal instructions that can implement the
88   /// same operation. Note that this means \p MI may be deleted, so any iterator
89   /// steps should be performed before calling this function. \p Helper should
90   /// be initialized to the MachineFunction containing \p MI.
91   ///
92   /// Considered as an opaque blob, the legal code will use and define the same
93   /// registers as \p MI.
94   LegalizeResult legalizeInstrStep(MachineInstr &MI,
95                                    LostDebugLocObserver &LocObserver);
96 
97   /// Legalize an instruction by emiting a runtime library call instead.
98   LegalizeResult libcall(MachineInstr &MI, LostDebugLocObserver &LocObserver);
99 
100   /// Legalize an instruction by reducing the width of the underlying scalar
101   /// type.
102   LegalizeResult narrowScalar(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
103 
104   /// Legalize an instruction by performing the operation on a wider scalar type
105   /// (for example a 16-bit addition can be safely performed at 32-bits
106   /// precision, ignoring the unused bits).
107   LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
108 
109   /// Legalize an instruction by replacing the value type
110   LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
111 
112   /// Legalize an instruction by splitting it into simpler parts, hopefully
113   /// understood by the target.
114   LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
115 
116   /// Legalize a vector instruction by splitting into multiple components, each
117   /// acting on the same scalar type as the original but with fewer elements.
118   LegalizeResult fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
119                                      LLT NarrowTy);
120 
121   /// Legalize a vector instruction by increasing the number of vector elements
122   /// involved and ignoring the added elements later.
123   LegalizeResult moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
124                                     LLT MoreTy);
125 
126   /// Cast the given value to an LLT::scalar with an equivalent size. Returns
127   /// the register to use if an instruction was inserted. Returns the original
128   /// register if no coercion was necessary.
129   //
130   // This may also fail and return Register() if there is no legal way to cast.
131   Register coerceToScalar(Register Val);
132 
133   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
134   /// Use by extending the operand's type to \p WideTy using the specified \p
135   /// ExtOpcode for the extension instruction, and replacing the vreg of the
136   /// operand in place.
137   void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx,
138                       unsigned ExtOpcode);
139 
140   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
141   /// Use by truncating the operand's type to \p NarrowTy using G_TRUNC, and
142   /// replacing the vreg of the operand in place.
143   void narrowScalarSrc(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx);
144 
145   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
146   /// Def by extending the operand's type to \p WideTy and truncating it back
147   /// with the \p TruncOpcode, and replacing the vreg of the operand in place.
148   void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx = 0,
149                       unsigned TruncOpcode = TargetOpcode::G_TRUNC);
150 
151   // Legalize a single operand \p OpIdx of the machine instruction \p MI as a
152   // Def by truncating the operand's type to \p NarrowTy, replacing in place and
153   // extending back with \p ExtOpcode.
154   void narrowScalarDst(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx,
155                        unsigned ExtOpcode);
156   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
157   /// Def by performing it with additional vector elements and extracting the
158   /// result elements, and replacing the vreg of the operand in place.
159   void moreElementsVectorDst(MachineInstr &MI, LLT MoreTy, unsigned OpIdx);
160 
161   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
162   /// Use by producing a vector with undefined high elements, extracting the
163   /// original vector type, and replacing the vreg of the operand in place.
164   void moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy, unsigned OpIdx);
165 
166   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
167   /// use by inserting a G_BITCAST to \p CastTy
168   void bitcastSrc(MachineInstr &MI, LLT CastTy, unsigned OpIdx);
169 
170   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
171   /// def by inserting a G_BITCAST from \p CastTy
172   void bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx);
173 
174 private:
175   LegalizeResult
176   widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
177   LegalizeResult
178   widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
179   LegalizeResult
180   widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
181   LegalizeResult
182   widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
183   LegalizeResult widenScalarAddSubOverflow(MachineInstr &MI, unsigned TypeIdx,
184                                            LLT WideTy);
185   LegalizeResult widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx,
186                                          LLT WideTy);
187   LegalizeResult widenScalarMulo(MachineInstr &MI, unsigned TypeIdx,
188                                  LLT WideTy);
189 
190   /// Helper function to split a wide generic register into bitwise blocks with
191   /// the given Type (which implies the number of blocks needed). The generic
192   /// registers created are appended to Ops, starting at bit 0 of Reg.
193   void extractParts(Register Reg, LLT Ty, int NumParts,
194                     SmallVectorImpl<Register> &VRegs);
195 
196   /// Version which handles irregular splits.
197   bool extractParts(Register Reg, LLT RegTy, LLT MainTy,
198                     LLT &LeftoverTy,
199                     SmallVectorImpl<Register> &VRegs,
200                     SmallVectorImpl<Register> &LeftoverVRegs);
201 
202   /// Version which handles irregular sub-vector splits.
203   void extractVectorParts(Register Reg, unsigned NumElst,
204                           SmallVectorImpl<Register> &VRegs);
205 
206   /// Helper function to build a wide generic register \p DstReg of type \p
207   /// RegTy from smaller parts. This will produce a G_MERGE_VALUES,
208   /// G_BUILD_VECTOR, G_CONCAT_VECTORS, or sequence of G_INSERT as appropriate
209   /// for the types.
210   ///
211   /// \p PartRegs must be registers of type \p PartTy.
212   ///
213   /// If \p ResultTy does not evenly break into \p PartTy sized pieces, the
214   /// remainder must be specified with \p LeftoverRegs of type \p LeftoverTy.
215   void insertParts(Register DstReg, LLT ResultTy,
216                    LLT PartTy, ArrayRef<Register> PartRegs,
217                    LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});
218 
219   /// Merge \p PartRegs with different types into \p DstReg.
220   void mergeMixedSubvectors(Register DstReg, ArrayRef<Register> PartRegs);
221 
222   void appendVectorElts(SmallVectorImpl<Register> &Elts, Register Reg);
223 
224   /// Unmerge \p SrcReg into smaller sized values, and append them to \p
225   /// Parts. The elements of \p Parts will be the greatest common divisor type
226   /// of \p DstTy, \p NarrowTy and the type of \p SrcReg. This will compute and
227   /// return the GCD type.
228   LLT extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
229                      LLT NarrowTy, Register SrcReg);
230 
231   /// Unmerge \p SrcReg into \p GCDTy typed registers. This will append all of
232   /// the unpacked registers to \p Parts. This version is if the common unmerge
233   /// type is already known.
234   void extractGCDType(SmallVectorImpl<Register> &Parts, LLT GCDTy,
235                       Register SrcReg);
236 
237   /// Produce a merge of values in \p VRegs to define \p DstReg. Perform a merge
238   /// from the least common multiple type, and convert as appropriate to \p
239   /// DstReg.
240   ///
241   /// \p VRegs should each have type \p GCDTy. This type should be greatest
242   /// common divisor type of \p DstReg, \p NarrowTy, and an undetermined source
243   /// type.
244   ///
245   /// \p NarrowTy is the desired result merge source type. If the source value
246   /// needs to be widened to evenly cover \p DstReg, inserts high bits
247   /// corresponding to the extension opcode \p PadStrategy.
248   ///
249   /// \p VRegs will be cleared, and the result \p NarrowTy register pieces
250   /// will replace it. Returns The complete LCMTy that \p VRegs will cover when
251   /// merged.
252   LLT buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
253                           SmallVectorImpl<Register> &VRegs,
254                           unsigned PadStrategy = TargetOpcode::G_ANYEXT);
255 
256   /// Merge the values in \p RemergeRegs to an \p LCMTy typed value. Extract the
257   /// low bits into \p DstReg. This is intended to use the outputs from
258   /// buildLCMMergePieces after processing.
259   void buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
260                                 ArrayRef<Register> RemergeRegs);
261 
262   /// Perform generic multiplication of values held in multiple registers.
263   /// Generated instructions use only types NarrowTy and i1.
264   /// Destination can be same or two times size of the source.
265   void multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
266                          ArrayRef<Register> Src1Regs,
267                          ArrayRef<Register> Src2Regs, LLT NarrowTy);
268 
269   void changeOpcode(MachineInstr &MI, unsigned NewOpcode);
270 
271   LegalizeResult tryNarrowPow2Reduction(MachineInstr &MI, Register SrcReg,
272                                         LLT SrcTy, LLT NarrowTy,
273                                         unsigned ScalarOpc);
274 
275   // Memcpy family legalization helpers.
276   LegalizeResult lowerMemset(MachineInstr &MI, Register Dst, Register Val,
277                              uint64_t KnownLen, Align Alignment,
278                              bool IsVolatile);
279   LegalizeResult lowerMemcpyInline(MachineInstr &MI, Register Dst, Register Src,
280                                    uint64_t KnownLen, Align DstAlign,
281                                    Align SrcAlign, bool IsVolatile);
282   LegalizeResult lowerMemcpy(MachineInstr &MI, Register Dst, Register Src,
283                              uint64_t KnownLen, uint64_t Limit, Align DstAlign,
284                              Align SrcAlign, bool IsVolatile);
285   LegalizeResult lowerMemmove(MachineInstr &MI, Register Dst, Register Src,
286                               uint64_t KnownLen, Align DstAlign, Align SrcAlign,
287                               bool IsVolatile);
288 
289   // Implements floating-point environment read/write via library function call.
290   LegalizeResult createGetStateLibcall(MachineIRBuilder &MIRBuilder,
291                                        MachineInstr &MI,
292                                        LostDebugLocObserver &LocObserver);
293   LegalizeResult createSetStateLibcall(MachineIRBuilder &MIRBuilder,
294                                        MachineInstr &MI,
295                                        LostDebugLocObserver &LocObserver);
296   LegalizeResult createResetStateLibcall(MachineIRBuilder &MIRBuilder,
297                                          MachineInstr &MI,
298                                          LostDebugLocObserver &LocObserver);
299 
300 public:
301   /// Return the alignment to use for a stack temporary object with the given
302   /// type.
303   Align getStackTemporaryAlignment(LLT Type, Align MinAlign = Align()) const;
304 
305   /// Create a stack temporary based on the size in bytes and the alignment
306   MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment,
307                                            MachinePointerInfo &PtrInfo);
308 
309   /// Get a pointer to vector element \p Index located in memory for a vector of
310   /// type \p VecTy starting at a base address of \p VecPtr. If \p Index is out
311   /// of bounds the returned pointer is unspecified, but will be within the
312   /// vector bounds.
313   Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index);
314 
315   /// Handles most opcodes. Split \p MI into same instruction on sub-vectors or
316   /// scalars with \p NumElts elements (1 for scalar). Supports uneven splits:
317   /// there can be leftover sub-vector with fewer then \p NumElts or a leftover
318   /// scalar. To avoid this use moreElements first and set MI number of elements
319   /// to multiple of \p NumElts. Non-vector operands that should be used on all
320   /// sub-instructions without split are listed in \p NonVecOpIndices.
321   LegalizeResult fewerElementsVectorMultiEltType(
322       GenericMachineInstr &MI, unsigned NumElts,
323       std::initializer_list<unsigned> NonVecOpIndices = {});
324 
325   LegalizeResult fewerElementsVectorPhi(GenericMachineInstr &MI,
326                                         unsigned NumElts);
327 
328   LegalizeResult moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
329                                        LLT MoreTy);
330   LegalizeResult moreElementsVectorShuffle(MachineInstr &MI, unsigned TypeIdx,
331                                            LLT MoreTy);
332 
333   LegalizeResult fewerElementsVectorUnmergeValues(MachineInstr &MI,
334                                                   unsigned TypeIdx,
335                                                   LLT NarrowTy);
336   LegalizeResult fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx,
337                                           LLT NarrowTy);
338   LegalizeResult fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI,
339                                                            unsigned TypeIdx,
340                                                            LLT NarrowTy);
341 
342   /// Equalize source and destination vector sizes of G_SHUFFLE_VECTOR.
343   LegalizeResult equalizeVectorShuffleLengths(MachineInstr &MI);
344 
345   LegalizeResult reduceLoadStoreWidth(GLoadStore &MI, unsigned TypeIdx,
346                                       LLT NarrowTy);
347 
348   LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
349                                              LLT HalfTy, LLT ShiftAmtTy);
350 
351   LegalizeResult fewerElementsVectorReductions(MachineInstr &MI,
352                                                unsigned TypeIdx, LLT NarrowTy);
353   LegalizeResult fewerElementsVectorSeqReductions(MachineInstr &MI,
354                                                   unsigned TypeIdx,
355                                                   LLT NarrowTy);
356 
357   LegalizeResult fewerElementsVectorShuffle(MachineInstr &MI, unsigned TypeIdx,
358                                             LLT NarrowTy);
359 
360   LegalizeResult narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
361   LegalizeResult narrowScalarAddSub(MachineInstr &MI, unsigned TypeIdx,
362                                     LLT NarrowTy);
363   LegalizeResult narrowScalarMul(MachineInstr &MI, LLT Ty);
364   LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
365   LegalizeResult narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
366   LegalizeResult narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
367 
368   LegalizeResult narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
369   LegalizeResult narrowScalarExt(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
370   LegalizeResult narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
371   LegalizeResult narrowScalarCTLZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
372   LegalizeResult narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
373   LegalizeResult narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
374   LegalizeResult narrowScalarFLDEXP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
375 
376   /// Perform Bitcast legalize action on G_EXTRACT_VECTOR_ELT.
377   LegalizeResult bitcastExtractVectorElt(MachineInstr &MI, unsigned TypeIdx,
378                                          LLT CastTy);
379 
380   /// Perform Bitcast legalize action on G_INSERT_VECTOR_ELT.
381   LegalizeResult bitcastInsertVectorElt(MachineInstr &MI, unsigned TypeIdx,
382                                         LLT CastTy);
383 
384   LegalizeResult lowerFConstant(MachineInstr &MI);
385   LegalizeResult lowerBitcast(MachineInstr &MI);
386   LegalizeResult lowerLoad(GAnyLoad &MI);
387   LegalizeResult lowerStore(GStore &MI);
388   LegalizeResult lowerBitCount(MachineInstr &MI);
389   LegalizeResult lowerFunnelShiftWithInverse(MachineInstr &MI);
390   LegalizeResult lowerFunnelShiftAsShifts(MachineInstr &MI);
391   LegalizeResult lowerFunnelShift(MachineInstr &MI);
392   LegalizeResult lowerEXT(MachineInstr &MI);
393   LegalizeResult lowerTRUNC(MachineInstr &MI);
394   LegalizeResult lowerRotateWithReverseRotate(MachineInstr &MI);
395   LegalizeResult lowerRotate(MachineInstr &MI);
396 
397   LegalizeResult lowerU64ToF32BitOps(MachineInstr &MI);
398   LegalizeResult lowerUITOFP(MachineInstr &MI);
399   LegalizeResult lowerSITOFP(MachineInstr &MI);
400   LegalizeResult lowerFPTOUI(MachineInstr &MI);
401   LegalizeResult lowerFPTOSI(MachineInstr &MI);
402 
403   LegalizeResult lowerFPTRUNC_F64_TO_F16(MachineInstr &MI);
404   LegalizeResult lowerFPTRUNC(MachineInstr &MI);
405   LegalizeResult lowerFPOWI(MachineInstr &MI);
406 
407   LegalizeResult lowerISFPCLASS(MachineInstr &MI);
408 
409   LegalizeResult lowerMinMax(MachineInstr &MI);
410   LegalizeResult lowerFCopySign(MachineInstr &MI);
411   LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI);
412   LegalizeResult lowerFMad(MachineInstr &MI);
413   LegalizeResult lowerIntrinsicRound(MachineInstr &MI);
414   LegalizeResult lowerFFloor(MachineInstr &MI);
415   LegalizeResult lowerMergeValues(MachineInstr &MI);
416   LegalizeResult lowerUnmergeValues(MachineInstr &MI);
417   LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI);
418   LegalizeResult lowerShuffleVector(MachineInstr &MI);
419   Register getDynStackAllocTargetPtr(Register SPReg, Register AllocSize,
420                                      Align Alignment, LLT PtrTy);
421   LegalizeResult lowerDynStackAlloc(MachineInstr &MI);
422   LegalizeResult lowerStackSave(MachineInstr &MI);
423   LegalizeResult lowerStackRestore(MachineInstr &MI);
424   LegalizeResult lowerExtract(MachineInstr &MI);
425   LegalizeResult lowerInsert(MachineInstr &MI);
426   LegalizeResult lowerSADDO_SSUBO(MachineInstr &MI);
427   LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI);
428   LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI);
429   LegalizeResult lowerShlSat(MachineInstr &MI);
430   LegalizeResult lowerBswap(MachineInstr &MI);
431   LegalizeResult lowerBitreverse(MachineInstr &MI);
432   LegalizeResult lowerReadWriteRegister(MachineInstr &MI);
433   LegalizeResult lowerSMULH_UMULH(MachineInstr &MI);
434   LegalizeResult lowerSelect(MachineInstr &MI);
435   LegalizeResult lowerDIVREM(MachineInstr &MI);
436   LegalizeResult lowerAbsToAddXor(MachineInstr &MI);
437   LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI);
438   LegalizeResult lowerVectorReduction(MachineInstr &MI);
439   LegalizeResult lowerMemcpyInline(MachineInstr &MI);
440   LegalizeResult lowerMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
441   LegalizeResult lowerVAArg(MachineInstr &MI);
442 };
443 
444 /// Helper function that creates a libcall to the given \p Name using the given
445 /// calling convention \p CC.
446 LegalizerHelper::LegalizeResult
447 createLibcall(MachineIRBuilder &MIRBuilder, const char *Name,
448               const CallLowering::ArgInfo &Result,
449               ArrayRef<CallLowering::ArgInfo> Args, CallingConv::ID CC,
450               LostDebugLocObserver &LocObserver, MachineInstr *MI = nullptr);
451 
452 /// Helper function that creates the given libcall.
453 LegalizerHelper::LegalizeResult
454 createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
455               const CallLowering::ArgInfo &Result,
456               ArrayRef<CallLowering::ArgInfo> Args,
457               LostDebugLocObserver &LocObserver, MachineInstr *MI = nullptr);
458 
459 /// Create a libcall to memcpy et al.
460 LegalizerHelper::LegalizeResult
461 createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
462                  MachineInstr &MI, LostDebugLocObserver &LocObserver);
463 
464 
465 } // End namespace llvm.
466 
467 #endif
468