xref: /freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===--------------------------------------------------------------------===//
8 /// \file
9 /// This contains common combine transformations that may be used in a combine
10 /// pass,or by the target elsewhere.
11 /// Targets can pick individual opcode transformations from the helper or use
12 /// tryCombine which invokes all transformations. All of the transformations
13 /// return true if the MachineInstruction changed and false otherwise.
14 ///
15 //===--------------------------------------------------------------------===//
16 
17 #ifndef LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
18 #define LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
19 
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
23 #include "llvm/CodeGen/Register.h"
24 #include "llvm/CodeGenTypes/LowLevelType.h"
25 #include "llvm/IR/InstrTypes.h"
26 #include <functional>
27 
28 namespace llvm {
29 
30 class GISelChangeObserver;
31 class APInt;
32 class ConstantFP;
33 class GPtrAdd;
34 class GZExtLoad;
35 class MachineIRBuilder;
36 class MachineInstrBuilder;
37 class MachineRegisterInfo;
38 class MachineInstr;
39 class MachineOperand;
40 class GISelKnownBits;
41 class MachineDominatorTree;
42 class LegalizerInfo;
43 struct LegalityQuery;
44 class RegisterBank;
45 class RegisterBankInfo;
46 class TargetLowering;
47 class TargetRegisterInfo;
48 
49 struct PreferredTuple {
50   LLT Ty;                // The result type of the extend.
51   unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
52   MachineInstr *MI;
53 };
54 
55 struct IndexedLoadStoreMatchInfo {
56   Register Addr;
57   Register Base;
58   Register Offset;
59   bool RematOffset = false; // True if Offset is a constant that needs to be
60                             // rematerialized before the new load/store.
61   bool IsPre = false;
62 };
63 
64 struct PtrAddChain {
65   int64_t Imm;
66   Register Base;
67   const RegisterBank *Bank;
68 };
69 
70 struct RegisterImmPair {
71   Register Reg;
72   int64_t Imm;
73 };
74 
75 struct ShiftOfShiftedLogic {
76   MachineInstr *Logic;
77   MachineInstr *Shift2;
78   Register LogicNonShiftReg;
79   uint64_t ValSum;
80 };
81 
82 using BuildFnTy = std::function<void(MachineIRBuilder &)>;
83 
84 using OperandBuildSteps =
85     SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
86 struct InstructionBuildSteps {
87   unsigned Opcode = 0;          /// The opcode for the produced instruction.
88   OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
89   InstructionBuildSteps() = default;
InstructionBuildStepsInstructionBuildSteps90   InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
91       : Opcode(Opcode), OperandFns(OperandFns) {}
92 };
93 
94 struct InstructionStepsMatchInfo {
95   /// Describes instructions to be built during a combine.
96   SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
97   InstructionStepsMatchInfo() = default;
InstructionStepsMatchInfoInstructionStepsMatchInfo98   InstructionStepsMatchInfo(
99       std::initializer_list<InstructionBuildSteps> InstrsToBuild)
100       : InstrsToBuild(InstrsToBuild) {}
101 };
102 
103 class CombinerHelper {
104 protected:
105   MachineIRBuilder &Builder;
106   MachineRegisterInfo &MRI;
107   GISelChangeObserver &Observer;
108   GISelKnownBits *KB;
109   MachineDominatorTree *MDT;
110   bool IsPreLegalize;
111   const LegalizerInfo *LI;
112   const RegisterBankInfo *RBI;
113   const TargetRegisterInfo *TRI;
114 
115 public:
116   CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B,
117                  bool IsPreLegalize,
118                  GISelKnownBits *KB = nullptr,
119                  MachineDominatorTree *MDT = nullptr,
120                  const LegalizerInfo *LI = nullptr);
121 
getKnownBits()122   GISelKnownBits *getKnownBits() const {
123     return KB;
124   }
125 
getBuilder()126   MachineIRBuilder &getBuilder() const {
127     return Builder;
128   }
129 
130   const TargetLowering &getTargetLowering() const;
131 
132   /// \returns true if the combiner is running pre-legalization.
133   bool isPreLegalize() const;
134 
135   /// \returns true if \p Query is legal on the target.
136   bool isLegal(const LegalityQuery &Query) const;
137 
138   /// \return true if the combine is running prior to legalization, or if \p
139   /// Query is legal on the target.
140   bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
141 
142   /// \return true if the combine is running prior to legalization, or if \p Ty
143   /// is a legal integer constant type on the target.
144   bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const;
145 
146   /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
147   void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
148 
149   /// Replace a single register operand with a new register and inform the
150   /// observer of the changes.
151   void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp,
152                         Register ToReg) const;
153 
154   /// Replace the opcode in instruction with a new opcode and inform the
155   /// observer of the changes.
156   void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const;
157 
158   /// Get the register bank of \p Reg.
159   /// If Reg has not been assigned a register, a register class,
160   /// or a register bank, then this returns nullptr.
161   ///
162   /// \pre Reg.isValid()
163   const RegisterBank *getRegBank(Register Reg) const;
164 
165   /// Set the register bank of \p Reg.
166   /// Does nothing if the RegBank is null.
167   /// This is the counterpart to getRegBank.
168   void setRegBank(Register Reg, const RegisterBank *RegBank);
169 
170   /// If \p MI is COPY, try to combine it.
171   /// Returns true if MI changed.
172   bool tryCombineCopy(MachineInstr &MI);
173   bool matchCombineCopy(MachineInstr &MI);
174   void applyCombineCopy(MachineInstr &MI);
175 
176   /// Returns true if \p DefMI precedes \p UseMI or they are the same
177   /// instruction. Both must be in the same basic block.
178   bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI);
179 
180   /// Returns true if \p DefMI dominates \p UseMI. By definition an
181   /// instruction dominates itself.
182   ///
183   /// If we haven't been provided with a MachineDominatorTree during
184   /// construction, this function returns a conservative result that tracks just
185   /// a single basic block.
186   bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI);
187 
188   /// If \p MI is extend that consumes the result of a load, try to combine it.
189   /// Returns true if MI changed.
190   bool tryCombineExtendingLoads(MachineInstr &MI);
191   bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
192   void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
193 
194   /// Match (and (load x), mask) -> zextload x
195   bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo);
196 
197   /// Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed
198   /// load.
199   bool matchCombineExtractedVectorLoad(MachineInstr &MI, BuildFnTy &MatchInfo);
200 
201   bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
202   void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
203 
204   bool matchSextTruncSextLoad(MachineInstr &MI);
205   void applySextTruncSextLoad(MachineInstr &MI);
206 
207   /// Match sext_inreg(load p), imm -> sextload p
208   bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
209   void applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
210 
211   /// Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM
212   /// when their source operands are identical.
213   bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI);
214   void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI);
215 
216   /// If a brcond's true block is not the fallthrough, make it so by inverting
217   /// the condition and swapping operands.
218   bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond);
219   void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond);
220 
221   /// If \p MI is G_CONCAT_VECTORS, try to combine it.
222   /// Returns true if MI changed.
223   /// Right now, we support:
224   /// - concat_vector(undef, undef) => undef
225   /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
226   ///   build_vector(A, B, C, D)
227   /// ==========================================================
228   /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
229   /// can be flattened into a build_vector.
230   /// In the first case \p Ops will be empty
231   /// In the second case \p Ops will contain the operands
232   /// needed to produce the flattened build_vector.
233   ///
234   /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
235   bool matchCombineConcatVectors(MachineInstr &MI, SmallVector<Register> &Ops);
236   /// Replace \p MI with a flattened build_vector with \p Ops
237   /// or an implicit_def if \p Ops is empty.
238   void applyCombineConcatVectors(MachineInstr &MI, SmallVector<Register> &Ops);
239 
240   bool matchCombineShuffleConcat(MachineInstr &MI, SmallVector<Register> &Ops);
241   /// Replace \p MI with a flattened build_vector with \p Ops
242   /// or an implicit_def if \p Ops is empty.
243   void applyCombineShuffleConcat(MachineInstr &MI, SmallVector<Register> &Ops);
244 
245   /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
246   /// Returns true if MI changed.
247   ///
248   /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
249   bool tryCombineShuffleVector(MachineInstr &MI);
250   /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
251   /// concat_vectors.
252   /// \p Ops will contain the operands needed to produce the flattened
253   /// concat_vectors.
254   ///
255   /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
256   bool matchCombineShuffleVector(MachineInstr &MI,
257                                  SmallVectorImpl<Register> &Ops);
258   /// Replace \p MI with a concat_vectors with \p Ops.
259   void applyCombineShuffleVector(MachineInstr &MI,
260                                  const ArrayRef<Register> Ops);
261   bool matchShuffleToExtract(MachineInstr &MI);
262   void applyShuffleToExtract(MachineInstr &MI);
263 
264   /// Optimize memcpy intrinsics et al, e.g. constant len calls.
265   /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
266   ///
267   /// For example (pre-indexed):
268   ///
269   ///     $addr = G_PTR_ADD $base, $offset
270   ///     [...]
271   ///     $val = G_LOAD $addr
272   ///     [...]
273   ///     $whatever = COPY $addr
274   ///
275   /// -->
276   ///
277   ///     $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
278   ///     [...]
279   ///     $whatever = COPY $addr
280   ///
281   /// or (post-indexed):
282   ///
283   ///     G_STORE $val, $base
284   ///     [...]
285   ///     $addr = G_PTR_ADD $base, $offset
286   ///     [...]
287   ///     $whatever = COPY $addr
288   ///
289   /// -->
290   ///
291   ///     $addr = G_INDEXED_STORE $val, $base, $offset
292   ///     [...]
293   ///     $whatever = COPY $addr
294   bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
295 
296   bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
297   void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
298 
299   /// Fold (shift (shift base, x), y) -> (shift base (x+y))
300   bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
301   void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
302 
303   /// If we have a shift-by-constant of a bitwise logic op that itself has a
304   /// shift-by-constant operand with identical opcode, we may be able to convert
305   /// that into 2 independent shifts followed by the logic op.
306   bool matchShiftOfShiftedLogic(MachineInstr &MI,
307                                 ShiftOfShiftedLogic &MatchInfo);
308   void applyShiftOfShiftedLogic(MachineInstr &MI,
309                                 ShiftOfShiftedLogic &MatchInfo);
310 
311   bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo);
312 
313   /// Transform a multiply by a power-of-2 value to a left shift.
314   bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
315   void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
316 
317   // Transform a G_SHL with an extended source into a narrower shift if
318   // possible.
319   bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
320   void applyCombineShlOfExtend(MachineInstr &MI,
321                                const RegisterImmPair &MatchData);
322 
323   /// Fold away a merge of an unmerge of the corresponding values.
324   bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo);
325 
326   /// Reduce a shift by a constant to an unmerge and a shift on a half sized
327   /// type. This will not produce a shift smaller than \p TargetShiftSize.
328   bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
329                                  unsigned &ShiftVal);
330   void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
331   bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);
332 
333   /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
334   bool
335   matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
336                                         SmallVectorImpl<Register> &Operands);
337   void
338   applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
339                                         SmallVectorImpl<Register> &Operands);
340 
341   /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
342   bool matchCombineUnmergeConstant(MachineInstr &MI,
343                                    SmallVectorImpl<APInt> &Csts);
344   void applyCombineUnmergeConstant(MachineInstr &MI,
345                                    SmallVectorImpl<APInt> &Csts);
346 
347   /// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
348   bool
349   matchCombineUnmergeUndef(MachineInstr &MI,
350                            std::function<void(MachineIRBuilder &)> &MatchInfo);
351 
352   /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
353   bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
354   void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
355 
356   /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
357   bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
358   void applyCombineUnmergeZExtToZExt(MachineInstr &MI);
359 
360   /// Transform fp_instr(cst) to constant result of the fp operation.
361   void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst);
362 
363   /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
364   bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
365   void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);
366 
367   /// Transform PtrToInt(IntToPtr(x)) to x.
368   void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);
369 
370   /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
371   /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
372   bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
373                                   std::pair<Register, bool> &PtrRegAndCommute);
374   void applyCombineAddP2IToPtrAdd(MachineInstr &MI,
375                                   std::pair<Register, bool> &PtrRegAndCommute);
376 
377   // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
378   bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst);
379   void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst);
380 
381   /// Transform anyext(trunc(x)) to x.
382   bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
383 
384   /// Transform zext(trunc(x)) to x.
385   bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg);
386 
387   /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
388   bool matchCombineExtOfExt(MachineInstr &MI,
389                             std::tuple<Register, unsigned> &MatchInfo);
390   void applyCombineExtOfExt(MachineInstr &MI,
391                             std::tuple<Register, unsigned> &MatchInfo);
392 
393   /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
394   bool matchCombineTruncOfExt(MachineInstr &MI,
395                               std::pair<Register, unsigned> &MatchInfo);
396   void applyCombineTruncOfExt(MachineInstr &MI,
397                               std::pair<Register, unsigned> &MatchInfo);
398 
399   /// Transform trunc (shl x, K) to shl (trunc x), K
400   ///    if K < VT.getScalarSizeInBits().
401   ///
402   /// Transforms trunc ([al]shr x, K) to (trunc ([al]shr (MidVT (trunc x)), K))
403   ///    if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits())
404   /// MidVT is obtained by finding a legal type between the trunc's src and dst
405   /// types.
406   bool matchCombineTruncOfShift(MachineInstr &MI,
407                                 std::pair<MachineInstr *, LLT> &MatchInfo);
408   void applyCombineTruncOfShift(MachineInstr &MI,
409                                 std::pair<MachineInstr *, LLT> &MatchInfo);
410 
411   /// Return true if any explicit use operand on \p MI is defined by a
412   /// G_IMPLICIT_DEF.
413   bool matchAnyExplicitUseIsUndef(MachineInstr &MI);
414 
415   /// Return true if all register explicit use operands on \p MI are defined by
416   /// a G_IMPLICIT_DEF.
417   bool matchAllExplicitUsesAreUndef(MachineInstr &MI);
418 
419   /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
420   bool matchUndefShuffleVectorMask(MachineInstr &MI);
421 
422   /// Return true if a G_STORE instruction \p MI is storing an undef value.
423   bool matchUndefStore(MachineInstr &MI);
424 
425   /// Return true if a G_SELECT instruction \p MI has an undef comparison.
426   bool matchUndefSelectCmp(MachineInstr &MI);
427 
428   /// Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
429   bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI);
430 
431   /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
432   /// true, \p OpIdx will store the operand index of the known selected value.
433   bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
434 
435   /// Replace an instruction with a G_FCONSTANT with value \p C.
436   void replaceInstWithFConstant(MachineInstr &MI, double C);
437 
438   /// Replace an instruction with an G_FCONSTANT with value \p CFP.
439   void replaceInstWithFConstant(MachineInstr &MI, ConstantFP *CFP);
440 
441   /// Replace an instruction with a G_CONSTANT with value \p C.
442   void replaceInstWithConstant(MachineInstr &MI, int64_t C);
443 
444   /// Replace an instruction with a G_CONSTANT with value \p C.
445   void replaceInstWithConstant(MachineInstr &MI, APInt C);
446 
447   /// Replace an instruction with a G_IMPLICIT_DEF.
448   void replaceInstWithUndef(MachineInstr &MI);
449 
450   /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
451   void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);
452 
453   /// Delete \p MI and replace all of its uses with \p Replacement.
454   void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);
455 
456   /// @brief Replaces the shift amount in \p MI with ShiftAmt % BW
457   /// @param MI
458   void applyFunnelShiftConstantModulo(MachineInstr &MI);
459 
460   /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
461   /// equivalent instructions.
462   bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);
463 
464   /// Return true if \p MOP is defined by a G_CONSTANT or splat with a value equal to
465   /// \p C.
466   bool matchConstantOp(const MachineOperand &MOP, int64_t C);
467 
468   /// Return true if \p MOP is defined by a G_FCONSTANT or splat with a value exactly
469   /// equal to \p C.
470   bool matchConstantFPOp(const MachineOperand &MOP, double C);
471 
472   /// @brief Checks if constant at \p ConstIdx is larger than \p MI 's bitwidth
473   /// @param ConstIdx Index of the constant
474   bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx);
475 
476   /// Optimize (cond ? x : x) -> x
477   bool matchSelectSameVal(MachineInstr &MI);
478 
479   /// Optimize (x op x) -> x
480   bool matchBinOpSameVal(MachineInstr &MI);
481 
482   /// Check if operand \p OpIdx is zero.
483   bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);
484 
485   /// Check if operand \p OpIdx is undef.
486   bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
487 
488   /// Check if operand \p OpIdx is known to be a power of 2.
489   bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);
490 
491   /// Erase \p MI
492   void eraseInst(MachineInstr &MI);
493 
494   /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
495   bool matchSimplifyAddToSub(MachineInstr &MI,
496                              std::tuple<Register, Register> &MatchInfo);
497   void applySimplifyAddToSub(MachineInstr &MI,
498                              std::tuple<Register, Register> &MatchInfo);
499 
500   /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
501   bool
502   matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
503                                        InstructionStepsMatchInfo &MatchInfo);
504 
505   /// Replace \p MI with a series of instructions described in \p MatchInfo.
506   void applyBuildInstructionSteps(MachineInstr &MI,
507                                   InstructionStepsMatchInfo &MatchInfo);
508 
509   /// Match ashr (shl x, C), C -> sext_inreg (C)
510   bool matchAshrShlToSextInreg(MachineInstr &MI,
511                                std::tuple<Register, int64_t> &MatchInfo);
512   void applyAshShlToSextInreg(MachineInstr &MI,
513                               std::tuple<Register, int64_t> &MatchInfo);
514 
515   /// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
516   bool matchOverlappingAnd(MachineInstr &MI,
517                            BuildFnTy &MatchInfo);
518 
519   /// \return true if \p MI is a G_AND instruction whose operands are x and y
520   /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
521   ///
522   /// \param [in] MI - The G_AND instruction.
523   /// \param [out] Replacement - A register the G_AND should be replaced with on
524   /// success.
525   bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
526 
527   /// \return true if \p MI is a G_OR instruction whose operands are x and y
528   /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
529   /// value.)
530   ///
531   /// \param [in] MI - The G_OR instruction.
532   /// \param [out] Replacement - A register the G_OR should be replaced with on
533   /// success.
534   bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
535 
536   /// \return true if \p MI is a G_SEXT_INREG that can be erased.
537   bool matchRedundantSExtInReg(MachineInstr &MI);
538 
539   /// Combine inverting a result of a compare into the opposite cond code.
540   bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
541   void applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
542 
543   /// Fold (xor (and x, y), y) -> (and (not x), y)
544   ///{
545   bool matchXorOfAndWithSameReg(MachineInstr &MI,
546                                 std::pair<Register, Register> &MatchInfo);
547   void applyXorOfAndWithSameReg(MachineInstr &MI,
548                                 std::pair<Register, Register> &MatchInfo);
549   ///}
550 
551   /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
552   bool matchPtrAddZero(MachineInstr &MI);
553   void applyPtrAddZero(MachineInstr &MI);
554 
555   /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
556   void applySimplifyURemByPow2(MachineInstr &MI);
557 
558   /// Push a binary operator through a select on constants.
559   ///
560   /// binop (select cond, K0, K1), K2 ->
561   ///   select cond, (binop K0, K2), (binop K1, K2)
562   bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo);
563   void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo);
564 
565   bool matchCombineInsertVecElts(MachineInstr &MI,
566                                  SmallVectorImpl<Register> &MatchInfo);
567 
568   void applyCombineInsertVecElts(MachineInstr &MI,
569                              SmallVectorImpl<Register> &MatchInfo);
570 
571   /// Match expression trees of the form
572   ///
573   /// \code
574   ///  sN *a = ...
575   ///  sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
576   /// \endcode
577   ///
578   /// And check if the tree can be replaced with a M-bit load + possibly a
579   /// bswap.
580   bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo);
581 
582   bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI);
583   void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI);
584 
585   bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg);
586   void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg);
587 
588   bool matchExtractAllEltsFromBuildVector(
589       MachineInstr &MI,
590       SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo);
591   void applyExtractAllEltsFromBuildVector(
592       MachineInstr &MI,
593       SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo);
594 
595   /// Use a function which takes in a MachineIRBuilder to perform a combine.
596   /// By default, it erases the instruction \p MI from the function.
597   void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo);
598   /// Use a function which takes in a MachineIRBuilder to perform a combine.
599   /// This variant does not erase \p MI after calling the build function.
600   void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo);
601 
602   bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo);
603   bool matchFunnelShiftToRotate(MachineInstr &MI);
604   void applyFunnelShiftToRotate(MachineInstr &MI);
605   bool matchRotateOutOfRange(MachineInstr &MI);
606   void applyRotateOutOfRange(MachineInstr &MI);
607 
608   /// \returns true if a G_ICMP instruction \p MI can be replaced with a true
609   /// or false constant based off of KnownBits information.
610   bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo);
611 
612   /// \returns true if a G_ICMP \p MI can be replaced with its LHS based off of
613   /// KnownBits information.
614   bool
615   matchICmpToLHSKnownBits(MachineInstr &MI,
616                           BuildFnTy &MatchInfo);
617 
618   /// \returns true if (and (or x, c1), c2) can be replaced with (and x, c2)
619   bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo);
620 
621   bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI,
622                                          BuildFnTy &MatchInfo);
623   /// Match: and (lshr x, cst), mask -> ubfx x, cst, width
624   bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo);
625 
626   /// Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width
627   bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo);
628 
629   /// Match: shr (and x, n), k -> ubfx x, pos, width
630   bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo);
631 
632   // Helpers for reassociation:
633   bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS,
634                                     BuildFnTy &MatchInfo);
635   bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS,
636                                           MachineInstr *RHS,
637                                           BuildFnTy &MatchInfo);
638   bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS,
639                                     MachineInstr *RHS, BuildFnTy &MatchInfo);
640   /// Reassociate pointer calculations with G_ADD involved, to allow better
641   /// addressing mode usage.
642   bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo);
643 
644   /// Try to reassociate to reassociate operands of a commutative binop.
645   bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0,
646                        Register Op1, BuildFnTy &MatchInfo);
647   /// Reassociate commutative binary operations like G_ADD.
648   bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo);
649 
650   /// Do constant folding when opportunities are exposed after MIR building.
651   bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo);
652 
653   /// Do constant folding when opportunities are exposed after MIR building.
654   bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo);
655 
656   /// Do constant FP folding when opportunities are exposed after MIR building.
657   bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP* &MatchInfo);
658 
659   /// Constant fold G_FMA/G_FMAD.
660   bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo);
661 
662   /// \returns true if it is possible to narrow the width of a scalar binop
663   /// feeding a G_AND instruction \p MI.
664   bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo);
665 
666   /// Given an G_UDIV \p MI expressing a divide by constant, return an
667   /// expression that implements it by multiplying by a magic number.
668   /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
669   MachineInstr *buildUDivUsingMul(MachineInstr &MI);
670   /// Combine G_UDIV by constant into a multiply by magic constant.
671   bool matchUDivByConst(MachineInstr &MI);
672   void applyUDivByConst(MachineInstr &MI);
673 
674   /// Given an G_SDIV \p MI expressing a signed divide by constant, return an
675   /// expression that implements it by multiplying by a magic number.
676   /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
677   MachineInstr *buildSDivUsingMul(MachineInstr &MI);
678   bool matchSDivByConst(MachineInstr &MI);
679   void applySDivByConst(MachineInstr &MI);
680 
681   /// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
682   /// return expressions that implements it by shifting.
683   bool matchDivByPow2(MachineInstr &MI, bool IsSigned);
684   void applySDivByPow2(MachineInstr &MI);
685   /// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
686   /// return expressions that implements it by shifting.
687   void applyUDivByPow2(MachineInstr &MI);
688 
689   // G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
690   bool matchUMulHToLShr(MachineInstr &MI);
691   void applyUMulHToLShr(MachineInstr &MI);
692 
693   /// Try to transform \p MI by using all of the above
694   /// combine functions. Returns true if changed.
695   bool tryCombine(MachineInstr &MI);
696 
697   /// Emit loads and stores that perform the given memcpy.
698   /// Assumes \p MI is a G_MEMCPY_INLINE
699   /// TODO: implement dynamically sized inline memcpy,
700   ///       and rename: s/bool tryEmit/void emit/
701   bool tryEmitMemcpyInline(MachineInstr &MI);
702 
703   /// Match:
704   ///   (G_UMULO x, 2) -> (G_UADDO x, x)
705   ///   (G_SMULO x, 2) -> (G_SADDO x, x)
706   bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo);
707 
708   /// Match:
709   /// (G_*MULO x, 0) -> 0 + no carry out
710   bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo);
711 
712   /// Match:
713   /// (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
714   /// (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
715   bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo);
716 
717   /// Transform (fadd x, fneg(y)) -> (fsub x, y)
718   ///           (fadd fneg(x), y) -> (fsub y, x)
719   ///           (fsub x, fneg(y)) -> (fadd x, y)
720   ///           (fmul fneg(x), fneg(y)) -> (fmul x, y)
721   ///           (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
722   ///           (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
723   ///           (fma fneg(x), fneg(y), z) -> (fma x, y, z)
724   bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo);
725 
726   bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo);
727   void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo);
728 
729   bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally,
730                            bool &HasFMAD, bool &Aggressive,
731                            bool CanReassociate = false);
732 
733   /// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
734   ///           (fadd (fmul x, y), z) -> (fmad x, y, z)
735   bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo);
736 
737   /// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
738   ///           (fadd (fpext (fmul x, y)), z) -> (fmad (fpext x), (fpext y), z)
739   bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI,
740                                             BuildFnTy &MatchInfo);
741 
742   /// Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
743   ///          (fadd (fmad x, y, (fmul u, v)), z) -> (fmad x, y, (fmad u, v, z))
744   bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI,
745                                           BuildFnTy &MatchInfo);
746 
747   // Transform (fadd (fma x, y, (fpext (fmul u, v))), z)
748   //            -> (fma x, y, (fma (fpext u), (fpext v), z))
749   //           (fadd (fmad x, y, (fpext (fmul u, v))), z)
750   //            -> (fmad x, y, (fmad (fpext u), (fpext v), z))
751   bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI,
752                                                       BuildFnTy &MatchInfo);
753 
754   /// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
755   ///           (fsub (fmul x, y), z) -> (fmad x, y, -z)
756   bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo);
757 
758   /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
759   ///           (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z))
760   bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI,
761                                            BuildFnTy &MatchInfo);
762 
763   /// Transform (fsub (fpext (fmul x, y)), z)
764   ///           -> (fma (fpext x), (fpext y), (fneg z))
765   ///           (fsub (fpext (fmul x, y)), z)
766   ///           -> (fmad (fpext x), (fpext y), (fneg z))
767   bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI,
768                                             BuildFnTy &MatchInfo);
769 
770   /// Transform (fsub (fpext (fneg (fmul x, y))), z)
771   ///           -> (fneg (fma (fpext x), (fpext y), z))
772   ///           (fsub (fpext (fneg (fmul x, y))), z)
773   ///           -> (fneg (fmad (fpext x), (fpext y), z))
774   bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI,
775                                                 BuildFnTy &MatchInfo);
776 
777   bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info);
778 
779   /// Transform G_ADD(x, G_SUB(y, x)) to y.
780   /// Transform G_ADD(G_SUB(y, x), x) to y.
781   bool matchAddSubSameReg(MachineInstr &MI, Register &Src);
782 
783   bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo);
784   bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo);
785   bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo);
786 
787   /// Transform:
788   ///   (x + y) - y -> x
789   ///   (x + y) - x -> y
790   ///   x - (y + x) -> 0 - y
791   ///   x - (x + z) -> 0 - z
792   bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo);
793 
794   /// \returns true if it is possible to simplify a select instruction \p MI
795   /// to a min/max instruction of some sort.
796   bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo);
797 
798   /// Transform:
799   ///   (X + Y) == X -> Y == 0
800   ///   (X - Y) == X -> Y == 0
801   ///   (X ^ Y) == X -> Y == 0
802   ///   (X + Y) != X -> Y != 0
803   ///   (X - Y) != X -> Y != 0
804   ///   (X ^ Y) != X -> Y != 0
805   bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo);
806 
807   /// Match shifts greater or equal to the bitwidth of the operation.
808   bool matchShiftsTooBig(MachineInstr &MI);
809 
810   /// Match constant LHS ops that should be commuted.
811   bool matchCommuteConstantToRHS(MachineInstr &MI);
812 
813   /// Combine sext of trunc.
814   bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo);
815 
816   /// Combine zext of trunc.
817   bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo);
818 
819   /// Combine zext nneg to sext.
820   bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo);
821 
822   /// Match constant LHS FP ops that should be commuted.
823   bool matchCommuteFPConstantToRHS(MachineInstr &MI);
824 
825   // Given a binop \p MI, commute operands 1 and 2.
826   void applyCommuteBinOpOperands(MachineInstr &MI);
827 
828   /// Combine select to integer min/max.
829   bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo);
830 
831   /// Combine selects.
832   bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo);
833 
834   /// Combine ands.
835   bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo);
836 
837   /// Combine ors.
838   bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo);
839 
840   /// Combine addos.
841   bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo);
842 
843   /// Combine extract vector element.
844   bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo);
845 
846   /// Combine extract vector element with a build vector on the vector register.
847   bool matchExtractVectorElementWithBuildVector(const MachineOperand &MO,
848                                                 BuildFnTy &MatchInfo);
849 
850   /// Combine extract vector element with a build vector trunc on the vector
851   /// register.
852   bool matchExtractVectorElementWithBuildVectorTrunc(const MachineOperand &MO,
853                                                      BuildFnTy &MatchInfo);
854 
855   /// Combine extract vector element with a shuffle vector on the vector
856   /// register.
857   bool matchExtractVectorElementWithShuffleVector(const MachineOperand &MO,
858                                                   BuildFnTy &MatchInfo);
859 
860   /// Combine extract vector element with a insert vector element on the vector
861   /// register and different indices.
862   bool matchExtractVectorElementWithDifferentIndices(const MachineOperand &MO,
863                                                      BuildFnTy &MatchInfo);
864   /// Use a function which takes in a MachineIRBuilder to perform a combine.
865   /// By default, it erases the instruction def'd on \p MO from the function.
866   void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo);
867 
868   /// Match FPOWI if it's safe to extend it into a series of multiplications.
869   bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent);
870 
871   /// Expands FPOWI into a series of multiplications and a division if the
872   /// exponent is negative.
873   void applyExpandFPowI(MachineInstr &MI, int64_t Exponent);
874 
875   /// Combine insert vector element OOB.
876   bool matchInsertVectorElementOOB(MachineInstr &MI, BuildFnTy &MatchInfo);
877 
878   bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI,
879                                              BuildFnTy &MatchInfo);
880 
881   bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
882 
883   bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
884 
885   bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
886 
887   bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
888 
889 private:
890   /// Checks for legality of an indexed variant of \p LdSt.
891   bool isIndexedLoadStoreLegal(GLoadStore &LdSt) const;
892   /// Given a non-indexed load or store instruction \p MI, find an offset that
893   /// can be usefully and legally folded into it as a post-indexing operation.
894   ///
895   /// \returns true if a candidate is found.
896   bool findPostIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
897                               Register &Offset, bool &RematOffset);
898 
899   /// Given a non-indexed load or store instruction \p MI, find an offset that
900   /// can be usefully and legally folded into it as a pre-indexing operation.
901   ///
902   /// \returns true if a candidate is found.
903   bool findPreIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
904                              Register &Offset);
905 
906   /// Helper function for matchLoadOrCombine. Searches for Registers
907   /// which may have been produced by a load instruction + some arithmetic.
908   ///
909   /// \param [in] Root - The search root.
910   ///
911   /// \returns The Registers found during the search.
912   std::optional<SmallVector<Register, 8>>
913   findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
914 
915   /// Helper function for matchLoadOrCombine.
916   ///
917   /// Checks if every register in \p RegsToVisit is defined by a load
918   /// instruction + some arithmetic.
919   ///
920   /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
921   /// at to the index of the load.
922   /// \param [in] MemSizeInBits - The number of bits each load should produce.
923   ///
924   /// \returns On success, a 3-tuple containing lowest-index load found, the
925   /// lowest index, and the last load in the sequence.
926   std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
927   findLoadOffsetsForLoadOrCombine(
928       SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
929       const SmallVector<Register, 8> &RegsToVisit,
930       const unsigned MemSizeInBits);
931 
932   /// Examines the G_PTR_ADD instruction \p PtrAdd and determines if performing
933   /// a re-association of its operands would break an existing legal addressing
934   /// mode that the address computation currently represents.
935   bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd);
936 
937   /// Behavior when a floating point min/max is given one NaN and one
938   /// non-NaN as input.
939   enum class SelectPatternNaNBehaviour {
940     NOT_APPLICABLE = 0, /// NaN behavior not applicable.
941     RETURNS_NAN,        /// Given one NaN input, returns the NaN.
942     RETURNS_OTHER,      /// Given one NaN input, returns the non-NaN.
943     RETURNS_ANY /// Given one NaN input, can return either (or both operands are
944                 /// known non-NaN.)
945   };
946 
947   /// \returns which of \p LHS and \p RHS would be the result of a non-equality
948   /// floating point comparison where one of \p LHS and \p RHS may be NaN.
949   ///
950   /// If both \p LHS and \p RHS may be NaN, returns
951   /// SelectPatternNaNBehaviour::NOT_APPLICABLE.
952   SelectPatternNaNBehaviour
953   computeRetValAgainstNaN(Register LHS, Register RHS,
954                           bool IsOrderedComparison) const;
955 
956   /// Determines the floating point min/max opcode which should be used for
957   /// a G_SELECT fed by a G_FCMP with predicate \p Pred.
958   ///
959   /// \returns 0 if this G_SELECT should not be combined to a floating point
960   /// min or max. If it should be combined, returns one of
961   ///
962   /// * G_FMAXNUM
963   /// * G_FMAXIMUM
964   /// * G_FMINNUM
965   /// * G_FMINIMUM
966   ///
967   /// Helper function for matchFPSelectToMinMax.
968   unsigned getFPMinMaxOpcForSelect(CmpInst::Predicate Pred, LLT DstTy,
969                                    SelectPatternNaNBehaviour VsNaNRetVal) const;
970 
971   /// Handle floating point cases for matchSimplifySelectToMinMax.
972   ///
973   /// E.g.
974   ///
975   /// select (fcmp uge x, 1.0) x, 1.0 -> fmax x, 1.0
976   /// select (fcmp uge x, 1.0) 1.0, x -> fminnm x, 1.0
977   bool matchFPSelectToMinMax(Register Dst, Register Cond, Register TrueVal,
978                              Register FalseVal, BuildFnTy &MatchInfo);
979 
980   /// Try to fold selects to logical operations.
981   bool tryFoldBoolSelectToLogic(GSelect *Select, BuildFnTy &MatchInfo);
982 
983   bool tryFoldSelectOfConstants(GSelect *Select, BuildFnTy &MatchInfo);
984 
985   bool isOneOrOneSplat(Register Src, bool AllowUndefs);
986   bool isZeroOrZeroSplat(Register Src, bool AllowUndefs);
987   bool isConstantSplatVector(Register Src, int64_t SplatValue,
988                              bool AllowUndefs);
989   bool isConstantOrConstantVectorI(Register Src) const;
990 
991   std::optional<APInt> getConstantOrConstantSplatVector(Register Src);
992 
993   /// Fold (icmp Pred1 V1, C1) && (icmp Pred2 V2, C2)
994   /// or   (icmp Pred1 V1, C1) || (icmp Pred2 V2, C2)
995   /// into a single comparison using range-based reasoning.
996   bool tryFoldAndOrOrICmpsUsingRanges(GLogicalBinOp *Logic,
997                                       BuildFnTy &MatchInfo);
998 
999   // Simplify (cmp cc0 x, y) (&& or ||) (cmp cc1 x, y) -> cmp cc2 x, y.
1000   bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo);
1001 };
1002 } // namespace llvm
1003 
1004 #endif
1005