xref: /freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //== llvm/CodeGen/GlobalISel/LegalizerHelper.h ---------------- -*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file A pass to convert the target-illegal operations created by IR -> MIR
10 /// translation into ones the target expects to be able to select. This may
11 /// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
12 /// G_ADD <4 x i16>.
13 ///
14 /// The LegalizerHelper class is where most of the work happens, and is
15 /// designed to be callable from other passes that find themselves with an
16 /// illegal instruction.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZERHELPER_H
21 #define LLVM_CODEGEN_GLOBALISEL_LEGALIZERHELPER_H
22 
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/RuntimeLibcallUtil.h"
27 #include "llvm/CodeGen/TargetOpcodes.h"
28 #include "llvm/Support/Compiler.h"
29 
30 namespace llvm {
31 // Forward declarations.
32 class APInt;
33 class GAnyLoad;
34 class GLoadStore;
35 class GStore;
36 class GenericMachineInstr;
37 class MachineFunction;
38 class MachineIRBuilder;
39 class MachineInstr;
40 class MachineInstrBuilder;
41 struct MachinePointerInfo;
42 template <typename T> class SmallVectorImpl;
43 class LegalizerInfo;
44 class MachineRegisterInfo;
45 class GISelChangeObserver;
46 class LostDebugLocObserver;
47 class TargetLowering;
48 
49 class LegalizerHelper {
50 public:
51   /// Expose MIRBuilder so clients can set their own RecordInsertInstruction
52   /// functions
53   MachineIRBuilder &MIRBuilder;
54 
55   /// To keep track of changes made by the LegalizerHelper.
56   GISelChangeObserver &Observer;
57 
58 private:
59   MachineRegisterInfo &MRI;
60   const LegalizerInfo &LI;
61   const TargetLowering &TLI;
62   GISelValueTracking *VT;
63 
64 public:
65   enum LegalizeResult {
66     /// Instruction was already legal and no change was made to the
67     /// MachineFunction.
68     AlreadyLegal,
69 
70     /// Instruction has been legalized and the MachineFunction changed.
71     Legalized,
72 
73     /// Some kind of error has occurred and we could not legalize this
74     /// instruction.
75     UnableToLegalize,
76   };
77 
78   /// Expose LegalizerInfo so the clients can re-use.
getLegalizerInfo()79   const LegalizerInfo &getLegalizerInfo() const { return LI; }
getTargetLowering()80   const TargetLowering &getTargetLowering() const { return TLI; }
getValueTracking()81   GISelValueTracking *getValueTracking() const { return VT; }
82 
83   LLVM_ABI LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
84                            MachineIRBuilder &B);
85   LLVM_ABI LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
86                            GISelChangeObserver &Observer, MachineIRBuilder &B,
87                            GISelValueTracking *VT = nullptr);
88 
89   /// Replace \p MI by a sequence of legal instructions that can implement the
90   /// same operation. Note that this means \p MI may be deleted, so any iterator
91   /// steps should be performed before calling this function. \p Helper should
92   /// be initialized to the MachineFunction containing \p MI.
93   ///
94   /// Considered as an opaque blob, the legal code will use and define the same
95   /// registers as \p MI.
96   LLVM_ABI LegalizeResult legalizeInstrStep(MachineInstr &MI,
97                                             LostDebugLocObserver &LocObserver);
98 
99   /// Legalize an instruction by emiting a runtime library call instead.
100   LLVM_ABI LegalizeResult libcall(MachineInstr &MI,
101                                   LostDebugLocObserver &LocObserver);
102 
103   /// Legalize an instruction by reducing the width of the underlying scalar
104   /// type.
105   LLVM_ABI LegalizeResult narrowScalar(MachineInstr &MI, unsigned TypeIdx,
106                                        LLT NarrowTy);
107 
108   /// Legalize an instruction by performing the operation on a wider scalar type
109   /// (for example a 16-bit addition can be safely performed at 32-bits
110   /// precision, ignoring the unused bits).
111   LLVM_ABI LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx,
112                                       LLT WideTy);
113 
114   /// Legalize an instruction by replacing the value type
115   LLVM_ABI LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
116 
117   /// Legalize an instruction by splitting it into simpler parts, hopefully
118   /// understood by the target.
119   LLVM_ABI LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
120 
121   /// Legalize a vector instruction by splitting into multiple components, each
122   /// acting on the same scalar type as the original but with fewer elements.
123   LLVM_ABI LegalizeResult fewerElementsVector(MachineInstr &MI,
124                                               unsigned TypeIdx, LLT NarrowTy);
125 
126   /// Legalize a vector instruction by increasing the number of vector elements
127   /// involved and ignoring the added elements later.
128   LLVM_ABI LegalizeResult moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
129                                              LLT MoreTy);
130 
131   /// Cast the given value to an LLT::scalar with an equivalent size. Returns
132   /// the register to use if an instruction was inserted. Returns the original
133   /// register if no coercion was necessary.
134   //
135   // This may also fail and return Register() if there is no legal way to cast.
136   LLVM_ABI Register coerceToScalar(Register Val);
137 
138   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
139   /// Use by extending the operand's type to \p WideTy using the specified \p
140   /// ExtOpcode for the extension instruction, and replacing the vreg of the
141   /// operand in place.
142   LLVM_ABI void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx,
143                                unsigned ExtOpcode);
144 
145   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
146   /// Use by truncating the operand's type to \p NarrowTy using G_TRUNC, and
147   /// replacing the vreg of the operand in place.
148   LLVM_ABI void narrowScalarSrc(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx);
149 
150   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
151   /// Def by extending the operand's type to \p WideTy and truncating it back
152   /// with the \p TruncOpcode, and replacing the vreg of the operand in place.
153   LLVM_ABI void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx = 0,
154                                unsigned TruncOpcode = TargetOpcode::G_TRUNC);
155 
156   // Legalize a single operand \p OpIdx of the machine instruction \p MI as a
157   // Def by truncating the operand's type to \p NarrowTy, replacing in place and
158   // extending back with \p ExtOpcode.
159   LLVM_ABI void narrowScalarDst(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx,
160                                 unsigned ExtOpcode);
161   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
162   /// Def by performing it with additional vector elements and extracting the
163   /// result elements, and replacing the vreg of the operand in place.
164   LLVM_ABI void moreElementsVectorDst(MachineInstr &MI, LLT MoreTy,
165                                       unsigned OpIdx);
166 
167   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
168   /// Use by producing a vector with undefined high elements, extracting the
169   /// original vector type, and replacing the vreg of the operand in place.
170   LLVM_ABI void moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy,
171                                       unsigned OpIdx);
172 
173   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
174   /// use by inserting a G_BITCAST to \p CastTy
175   LLVM_ABI void bitcastSrc(MachineInstr &MI, LLT CastTy, unsigned OpIdx);
176 
177   /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
178   /// def by inserting a G_BITCAST from \p CastTy
179   LLVM_ABI void bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx);
180 
181 private:
182   LegalizeResult
183   widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
184   LegalizeResult
185   widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
186   LegalizeResult
187   widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
188   LegalizeResult
189   widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
190   LegalizeResult widenScalarAddSubOverflow(MachineInstr &MI, unsigned TypeIdx,
191                                            LLT WideTy);
192   LegalizeResult widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx,
193                                          LLT WideTy);
194   LegalizeResult widenScalarMulo(MachineInstr &MI, unsigned TypeIdx,
195                                  LLT WideTy);
196 
197   /// Helper function to build a wide generic register \p DstReg of type \p
198   /// RegTy from smaller parts. This will produce a G_MERGE_VALUES,
199   /// G_BUILD_VECTOR, G_CONCAT_VECTORS, or sequence of G_INSERT as appropriate
200   /// for the types.
201   ///
202   /// \p PartRegs must be registers of type \p PartTy.
203   ///
204   /// If \p ResultTy does not evenly break into \p PartTy sized pieces, the
205   /// remainder must be specified with \p LeftoverRegs of type \p LeftoverTy.
206   void insertParts(Register DstReg, LLT ResultTy,
207                    LLT PartTy, ArrayRef<Register> PartRegs,
208                    LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});
209 
210   /// Merge \p PartRegs with different types into \p DstReg.
211   void mergeMixedSubvectors(Register DstReg, ArrayRef<Register> PartRegs);
212 
213   void appendVectorElts(SmallVectorImpl<Register> &Elts, Register Reg);
214 
215   /// Unmerge \p SrcReg into smaller sized values, and append them to \p
216   /// Parts. The elements of \p Parts will be the greatest common divisor type
217   /// of \p DstTy, \p NarrowTy and the type of \p SrcReg. This will compute and
218   /// return the GCD type.
219   LLT extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
220                      LLT NarrowTy, Register SrcReg);
221 
222   /// Unmerge \p SrcReg into \p GCDTy typed registers. This will append all of
223   /// the unpacked registers to \p Parts. This version is if the common unmerge
224   /// type is already known.
225   void extractGCDType(SmallVectorImpl<Register> &Parts, LLT GCDTy,
226                       Register SrcReg);
227 
228   /// Produce a merge of values in \p VRegs to define \p DstReg. Perform a merge
229   /// from the least common multiple type, and convert as appropriate to \p
230   /// DstReg.
231   ///
232   /// \p VRegs should each have type \p GCDTy. This type should be greatest
233   /// common divisor type of \p DstReg, \p NarrowTy, and an undetermined source
234   /// type.
235   ///
236   /// \p NarrowTy is the desired result merge source type. If the source value
237   /// needs to be widened to evenly cover \p DstReg, inserts high bits
238   /// corresponding to the extension opcode \p PadStrategy.
239   ///
240   /// \p VRegs will be cleared, and the result \p NarrowTy register pieces
241   /// will replace it. Returns The complete LCMTy that \p VRegs will cover when
242   /// merged.
243   LLT buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
244                           SmallVectorImpl<Register> &VRegs,
245                           unsigned PadStrategy = TargetOpcode::G_ANYEXT);
246 
247   /// Merge the values in \p RemergeRegs to an \p LCMTy typed value. Extract the
248   /// low bits into \p DstReg. This is intended to use the outputs from
249   /// buildLCMMergePieces after processing.
250   void buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
251                                 ArrayRef<Register> RemergeRegs);
252 
253   /// Perform generic multiplication of values held in multiple registers.
254   /// Generated instructions use only types NarrowTy and i1.
255   /// Destination can be same or two times size of the source.
256   void multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
257                          ArrayRef<Register> Src1Regs,
258                          ArrayRef<Register> Src2Regs, LLT NarrowTy);
259 
260   void changeOpcode(MachineInstr &MI, unsigned NewOpcode);
261 
262   LegalizeResult tryNarrowPow2Reduction(MachineInstr &MI, Register SrcReg,
263                                         LLT SrcTy, LLT NarrowTy,
264                                         unsigned ScalarOpc);
265 
266   // Memcpy family legalization helpers.
267   LegalizeResult lowerMemset(MachineInstr &MI, Register Dst, Register Val,
268                              uint64_t KnownLen, Align Alignment,
269                              bool IsVolatile);
270   LegalizeResult lowerMemcpyInline(MachineInstr &MI, Register Dst, Register Src,
271                                    uint64_t KnownLen, Align DstAlign,
272                                    Align SrcAlign, bool IsVolatile);
273   LegalizeResult lowerMemcpy(MachineInstr &MI, Register Dst, Register Src,
274                              uint64_t KnownLen, uint64_t Limit, Align DstAlign,
275                              Align SrcAlign, bool IsVolatile);
276   LegalizeResult lowerMemmove(MachineInstr &MI, Register Dst, Register Src,
277                               uint64_t KnownLen, Align DstAlign, Align SrcAlign,
278                               bool IsVolatile);
279 
280   // Implements floating-point environment read/write via library function call.
281   LegalizeResult createGetStateLibcall(MachineIRBuilder &MIRBuilder,
282                                        MachineInstr &MI,
283                                        LostDebugLocObserver &LocObserver);
284   LegalizeResult createSetStateLibcall(MachineIRBuilder &MIRBuilder,
285                                        MachineInstr &MI,
286                                        LostDebugLocObserver &LocObserver);
287   LegalizeResult createResetStateLibcall(MachineIRBuilder &MIRBuilder,
288                                          MachineInstr &MI,
289                                          LostDebugLocObserver &LocObserver);
290   LegalizeResult createFCMPLibcall(MachineIRBuilder &MIRBuilder,
291                                    MachineInstr &MI,
292                                    LostDebugLocObserver &LocObserver);
293 
294   MachineInstrBuilder
295   getNeutralElementForVecReduce(unsigned Opcode, MachineIRBuilder &MIRBuilder,
296                                 LLT Ty);
297 
298   LegalizeResult emitSincosLibcall(MachineInstr &MI,
299                                    MachineIRBuilder &MIRBuilder, unsigned Size,
300                                    Type *OpType,
301                                    LostDebugLocObserver &LocObserver);
302 
303 public:
304   /// Return the alignment to use for a stack temporary object with the given
305   /// type.
306   LLVM_ABI Align getStackTemporaryAlignment(LLT Type,
307                                             Align MinAlign = Align()) const;
308 
309   /// Create a stack temporary based on the size in bytes and the alignment
310   LLVM_ABI MachineInstrBuilder createStackTemporary(
311       TypeSize Bytes, Align Alignment, MachinePointerInfo &PtrInfo);
312 
313   /// Create a store of \p Val to a stack temporary and return a load as the
314   /// same type as \p Res.
315   LLVM_ABI MachineInstrBuilder createStackStoreLoad(const DstOp &Res,
316                                                     const SrcOp &Val);
317 
318   /// Given a store of a boolean vector, scalarize it.
319   LLVM_ABI LegalizeResult scalarizeVectorBooleanStore(GStore &MI);
320 
321   /// Get a pointer to vector element \p Index located in memory for a vector of
322   /// type \p VecTy starting at a base address of \p VecPtr. If \p Index is out
323   /// of bounds the returned pointer is unspecified, but will be within the
324   /// vector bounds.
325   LLVM_ABI Register getVectorElementPointer(Register VecPtr, LLT VecTy,
326                                             Register Index);
327 
328   /// Handles most opcodes. Split \p MI into same instruction on sub-vectors or
329   /// scalars with \p NumElts elements (1 for scalar). Supports uneven splits:
330   /// there can be leftover sub-vector with fewer then \p NumElts or a leftover
331   /// scalar. To avoid this use moreElements first and set MI number of elements
332   /// to multiple of \p NumElts. Non-vector operands that should be used on all
333   /// sub-instructions without split are listed in \p NonVecOpIndices.
334   LLVM_ABI LegalizeResult fewerElementsVectorMultiEltType(
335       GenericMachineInstr &MI, unsigned NumElts,
336       std::initializer_list<unsigned> NonVecOpIndices = {});
337 
338   LLVM_ABI LegalizeResult fewerElementsVectorPhi(GenericMachineInstr &MI,
339                                                  unsigned NumElts);
340 
341   LLVM_ABI LegalizeResult moreElementsVectorPhi(MachineInstr &MI,
342                                                 unsigned TypeIdx, LLT MoreTy);
343   LLVM_ABI LegalizeResult moreElementsVectorShuffle(MachineInstr &MI,
344                                                     unsigned TypeIdx,
345                                                     LLT MoreTy);
346 
347   LLVM_ABI LegalizeResult fewerElementsVectorUnmergeValues(MachineInstr &MI,
348                                                            unsigned TypeIdx,
349                                                            LLT NarrowTy);
350   LLVM_ABI LegalizeResult fewerElementsVectorMerge(MachineInstr &MI,
351                                                    unsigned TypeIdx,
352                                                    LLT NarrowTy);
353   LLVM_ABI LegalizeResult fewerElementsVectorExtractInsertVectorElt(
354       MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
355 
356   /// Equalize source and destination vector sizes of G_SHUFFLE_VECTOR.
357   LLVM_ABI LegalizeResult equalizeVectorShuffleLengths(MachineInstr &MI);
358 
359   LLVM_ABI LegalizeResult reduceLoadStoreWidth(GLoadStore &MI, unsigned TypeIdx,
360                                                LLT NarrowTy);
361 
362   LLVM_ABI LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI,
363                                                       const APInt &Amt,
364                                                       LLT HalfTy,
365                                                       LLT ShiftAmtTy);
366 
367   LLVM_ABI LegalizeResult fewerElementsVectorReductions(MachineInstr &MI,
368                                                         unsigned TypeIdx,
369                                                         LLT NarrowTy);
370   LLVM_ABI LegalizeResult fewerElementsVectorSeqReductions(MachineInstr &MI,
371                                                            unsigned TypeIdx,
372                                                            LLT NarrowTy);
373 
374   // Fewer Elements for bitcast, ensuring that the size of the Src and Dst
375   // registers will be the same
376   LLVM_ABI LegalizeResult fewerElementsBitcast(MachineInstr &MI,
377                                                unsigned TypeIdx, LLT NarrowTy);
378 
379   LLVM_ABI LegalizeResult fewerElementsVectorShuffle(MachineInstr &MI,
380                                                      unsigned TypeIdx,
381                                                      LLT NarrowTy);
382 
383   LLVM_ABI LegalizeResult narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
384                                             LLT Ty);
385   LLVM_ABI LegalizeResult narrowScalarAddSub(MachineInstr &MI, unsigned TypeIdx,
386                                              LLT NarrowTy);
387   LLVM_ABI LegalizeResult narrowScalarMul(MachineInstr &MI, LLT Ty);
388   LLVM_ABI LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx,
389                                             LLT Ty);
390   LLVM_ABI LegalizeResult narrowScalarExtract(MachineInstr &MI,
391                                               unsigned TypeIdx, LLT Ty);
392   LLVM_ABI LegalizeResult narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx,
393                                              LLT Ty);
394 
395   LLVM_ABI LegalizeResult narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx,
396                                             LLT Ty);
397   LLVM_ABI LegalizeResult narrowScalarExt(MachineInstr &MI, unsigned TypeIdx,
398                                           LLT Ty);
399   LLVM_ABI LegalizeResult narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx,
400                                              LLT Ty);
401   LLVM_ABI LegalizeResult narrowScalarCTLZ(MachineInstr &MI, unsigned TypeIdx,
402                                            LLT Ty);
403   LLVM_ABI LegalizeResult narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx,
404                                            LLT Ty);
405   LLVM_ABI LegalizeResult narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx,
406                                             LLT Ty);
407   LLVM_ABI LegalizeResult narrowScalarFLDEXP(MachineInstr &MI, unsigned TypeIdx,
408                                              LLT Ty);
409 
410   /// Perform Bitcast legalize action on G_EXTRACT_VECTOR_ELT.
411   LLVM_ABI LegalizeResult bitcastExtractVectorElt(MachineInstr &MI,
412                                                   unsigned TypeIdx, LLT CastTy);
413 
414   /// Perform Bitcast legalize action on G_INSERT_VECTOR_ELT.
415   LLVM_ABI LegalizeResult bitcastInsertVectorElt(MachineInstr &MI,
416                                                  unsigned TypeIdx, LLT CastTy);
417   LLVM_ABI LegalizeResult bitcastConcatVector(MachineInstr &MI,
418                                               unsigned TypeIdx, LLT CastTy);
419   LLVM_ABI LegalizeResult bitcastShuffleVector(MachineInstr &MI,
420                                                unsigned TypeIdx, LLT CastTy);
421   LLVM_ABI LegalizeResult bitcastExtractSubvector(MachineInstr &MI,
422                                                   unsigned TypeIdx, LLT CastTy);
423   LLVM_ABI LegalizeResult bitcastInsertSubvector(MachineInstr &MI,
424                                                  unsigned TypeIdx, LLT CastTy);
425 
426   LLVM_ABI LegalizeResult lowerConstant(MachineInstr &MI);
427   LLVM_ABI LegalizeResult lowerFConstant(MachineInstr &MI);
428   LLVM_ABI LegalizeResult lowerBitcast(MachineInstr &MI);
429   LLVM_ABI LegalizeResult lowerLoad(GAnyLoad &MI);
430   LLVM_ABI LegalizeResult lowerStore(GStore &MI);
431   LLVM_ABI LegalizeResult lowerBitCount(MachineInstr &MI);
432   LLVM_ABI LegalizeResult lowerFunnelShiftWithInverse(MachineInstr &MI);
433   LLVM_ABI LegalizeResult lowerFunnelShiftAsShifts(MachineInstr &MI);
434   LLVM_ABI LegalizeResult lowerFunnelShift(MachineInstr &MI);
435   LLVM_ABI LegalizeResult lowerEXT(MachineInstr &MI);
436   LLVM_ABI LegalizeResult lowerTRUNC(MachineInstr &MI);
437   LLVM_ABI LegalizeResult lowerRotateWithReverseRotate(MachineInstr &MI);
438   LLVM_ABI LegalizeResult lowerRotate(MachineInstr &MI);
439 
440   LLVM_ABI LegalizeResult lowerU64ToF32BitOps(MachineInstr &MI);
441   LLVM_ABI LegalizeResult lowerU64ToF32WithSITOFP(MachineInstr &MI);
442   LLVM_ABI LegalizeResult lowerU64ToF64BitFloatOps(MachineInstr &MI);
443   LLVM_ABI LegalizeResult lowerUITOFP(MachineInstr &MI);
444   LLVM_ABI LegalizeResult lowerSITOFP(MachineInstr &MI);
445   LLVM_ABI LegalizeResult lowerFPTOUI(MachineInstr &MI);
446   LLVM_ABI LegalizeResult lowerFPTOSI(MachineInstr &MI);
447   LLVM_ABI LegalizeResult lowerFPTOINT_SAT(MachineInstr &MI);
448 
449   LLVM_ABI LegalizeResult lowerFPTRUNC_F64_TO_F16(MachineInstr &MI);
450   LLVM_ABI LegalizeResult lowerFPTRUNC(MachineInstr &MI);
451   LLVM_ABI LegalizeResult lowerFPOWI(MachineInstr &MI);
452 
453   LLVM_ABI LegalizeResult lowerISFPCLASS(MachineInstr &MI);
454 
455   LLVM_ABI LegalizeResult lowerThreewayCompare(MachineInstr &MI);
456   LLVM_ABI LegalizeResult lowerMinMax(MachineInstr &MI);
457   LLVM_ABI LegalizeResult lowerFCopySign(MachineInstr &MI);
458   LLVM_ABI LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI);
459   LLVM_ABI LegalizeResult lowerFMad(MachineInstr &MI);
460   LLVM_ABI LegalizeResult lowerIntrinsicRound(MachineInstr &MI);
461   LLVM_ABI LegalizeResult lowerFFloor(MachineInstr &MI);
462   LLVM_ABI LegalizeResult lowerMergeValues(MachineInstr &MI);
463   LLVM_ABI LegalizeResult lowerUnmergeValues(MachineInstr &MI);
464   LLVM_ABI LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI);
465   LLVM_ABI LegalizeResult lowerShuffleVector(MachineInstr &MI);
466   LLVM_ABI LegalizeResult lowerVECTOR_COMPRESS(MachineInstr &MI);
467   LLVM_ABI Register getDynStackAllocTargetPtr(Register SPReg,
468                                               Register AllocSize,
469                                               Align Alignment, LLT PtrTy);
470   LLVM_ABI LegalizeResult lowerDynStackAlloc(MachineInstr &MI);
471   LLVM_ABI LegalizeResult lowerStackSave(MachineInstr &MI);
472   LLVM_ABI LegalizeResult lowerStackRestore(MachineInstr &MI);
473   LLVM_ABI LegalizeResult lowerExtract(MachineInstr &MI);
474   LLVM_ABI LegalizeResult lowerInsert(MachineInstr &MI);
475   LLVM_ABI LegalizeResult lowerSADDO_SSUBO(MachineInstr &MI);
476   LLVM_ABI LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI);
477   LLVM_ABI LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI);
478   LLVM_ABI LegalizeResult lowerShlSat(MachineInstr &MI);
479   LLVM_ABI LegalizeResult lowerBswap(MachineInstr &MI);
480   LLVM_ABI LegalizeResult lowerBitreverse(MachineInstr &MI);
481   LLVM_ABI LegalizeResult lowerReadWriteRegister(MachineInstr &MI);
482   LLVM_ABI LegalizeResult lowerSMULH_UMULH(MachineInstr &MI);
483   LLVM_ABI LegalizeResult lowerSelect(MachineInstr &MI);
484   LLVM_ABI LegalizeResult lowerDIVREM(MachineInstr &MI);
485   LLVM_ABI LegalizeResult lowerAbsToAddXor(MachineInstr &MI);
486   LLVM_ABI LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI);
487   LLVM_ABI LegalizeResult lowerAbsToCNeg(MachineInstr &MI);
488   LLVM_ABI LegalizeResult lowerFAbs(MachineInstr &MI);
489   LLVM_ABI LegalizeResult lowerVectorReduction(MachineInstr &MI);
490   LLVM_ABI LegalizeResult lowerMemcpyInline(MachineInstr &MI);
491   LLVM_ABI LegalizeResult lowerMemCpyFamily(MachineInstr &MI,
492                                             unsigned MaxLen = 0);
493   LLVM_ABI LegalizeResult lowerVAArg(MachineInstr &MI);
494 };
495 
496 /// Helper function that creates a libcall to the given \p Name using the given
497 /// calling convention \p CC.
498 LLVM_ABI LegalizerHelper::LegalizeResult
499 createLibcall(MachineIRBuilder &MIRBuilder, const char *Name,
500               const CallLowering::ArgInfo &Result,
501               ArrayRef<CallLowering::ArgInfo> Args, CallingConv::ID CC,
502               LostDebugLocObserver &LocObserver, MachineInstr *MI = nullptr);
503 
504 /// Helper function that creates the given libcall.
505 LLVM_ABI LegalizerHelper::LegalizeResult
506 createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
507               const CallLowering::ArgInfo &Result,
508               ArrayRef<CallLowering::ArgInfo> Args,
509               LostDebugLocObserver &LocObserver, MachineInstr *MI = nullptr);
510 
511 /// Create a libcall to memcpy et al.
512 LLVM_ABI LegalizerHelper::LegalizeResult
513 createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
514                  MachineInstr &MI, LostDebugLocObserver &LocObserver);
515 
516 } // End namespace llvm.
517 
518 #endif
519