xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 //===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
15 #define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
16 
17 #include "RISCV.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 
22 namespace llvm {
23 class RISCVSubtarget;
24 struct RISCVRegisterInfo;
25 namespace RISCVISD {
26 enum NodeType : unsigned {
27   FIRST_NUMBER = ISD::BUILTIN_OP_END,
28   RET_FLAG,
29   URET_FLAG,
30   SRET_FLAG,
31   MRET_FLAG,
32   CALL,
33   /// Select with condition operator - This selects between a true value and
34   /// a false value (ops #3 and #4) based on the boolean result of comparing
35   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
36   /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.
37   /// The lhs and rhs are XLenVT integers. The true and false values can be
38   /// integer or floating point.
39   SELECT_CC,
40   BR_CC,
41   BuildPairF64,
42   SplitF64,
43   TAIL,
44   // Multiply high for signedxunsigned.
45   MULHSU,
46   // RV64I shifts, directly matching the semantics of the named RISC-V
47   // instructions.
48   SLLW,
49   SRAW,
50   SRLW,
51   // 32-bit operations from RV64M that can't be simply matched with a pattern
52   // at instruction selection time. These have undefined behavior for division
53   // by 0 or overflow (divw) like their target independent counterparts.
54   DIVW,
55   DIVUW,
56   REMUW,
57   // RV64IB rotates, directly matching the semantics of the named RISC-V
58   // instructions.
59   ROLW,
60   RORW,
61   // RV64IZbb bit counting instructions directly matching the semantics of the
62   // named RISC-V instructions.
63   CLZW,
64   CTZW,
65   // RV64IB/RV32IB funnel shifts, with the semantics of the named RISC-V
66   // instructions. Operand order is rs1, rs3, rs2/shamt.
67   FSR,
68   FSL,
69   // RV64IB funnel shifts, with the semantics of the named RISC-V instructions.
70   // Operand order is rs1, rs3, rs2/shamt.
71   FSRW,
72   FSLW,
73   // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
74   // XLEN is the only legal integer width.
75   //
76   // FMV_H_X matches the semantics of the FMV.H.X.
77   // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result.
78   // FMV_W_X_RV64 matches the semantics of the FMV.W.X.
79   // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.
80   //
81   // This is a more convenient semantic for producing dagcombines that remove
82   // unnecessary GPR->FPR->GPR moves.
83   FMV_H_X,
84   FMV_X_ANYEXTH,
85   FMV_W_X_RV64,
86   FMV_X_ANYEXTW_RV64,
87   // FP to XLen int conversions. Corresponds to fcvt.l(u).s/d/h on RV64 and
88   // fcvt.w(u).s/d/h on RV32. Unlike FP_TO_S/UINT these saturate out of
89   // range inputs. These are used for FP_TO_S/UINT_SAT lowering. Rounding mode
90   // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
91   FCVT_X,
92   FCVT_XU,
93   // FP to 32 bit int conversions for RV64. These are used to keep track of the
94   // result being sign extended to 64 bit. These saturate out of range inputs.
95   // Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering. Rounding mode
96   // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
97   FCVT_W_RV64,
98   FCVT_WU_RV64,
99   // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
100   // (returns (Lo, Hi)). It takes a chain operand.
101   READ_CYCLE_WIDE,
102   // Generalized Reverse and Generalized Or-Combine - directly matching the
103   // semantics of the named RISC-V instructions. Lowered as custom nodes as
104   // TableGen chokes when faced with commutative permutations in deeply-nested
105   // DAGs. Each node takes an input operand and a control operand and outputs a
106   // bit-manipulated version of input. All operands are i32 or XLenVT.
107   GREV,
108   GREVW,
109   GORC,
110   GORCW,
111   SHFL,
112   SHFLW,
113   UNSHFL,
114   UNSHFLW,
115   // Bit Compress/Decompress implement the generic bit extract and bit deposit
116   // functions. This operation is also referred to as bit gather/scatter, bit
117   // pack/unpack, parallel extract/deposit, compress/expand, or right
118   // compress/right expand.
119   BCOMPRESS,
120   BCOMPRESSW,
121   BDECOMPRESS,
122   BDECOMPRESSW,
123   // The bit field place (bfp) instruction places up to XLEN/2 LSB bits from rs2
124   // into the value in rs1. The upper bits of rs2 control the length of the bit
125   // field and target position. The layout of rs2 is chosen in a way that makes
126   // it possible to construct rs2 easily using pack[h] instructions and/or
127   // andi/lui.
128   BFP,
129   BFPW,
130   // Vector Extension
131   // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand
132   // for the VL value to be used for the operation.
133   VMV_V_X_VL,
134   // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand
135   // for the VL value to be used for the operation.
136   VFMV_V_F_VL,
137   // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign
138   // extended from the vector element size.
139   VMV_X_S,
140   // VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand.
141   VMV_S_X_VL,
142   // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand.
143   VFMV_S_F_VL,
144   // Splats an i64 scalar to a vector type (with element type i64) where the
145   // scalar is a sign-extended i32.
146   SPLAT_VECTOR_I64,
147   // Splats an 64-bit value that has been split into two i32 parts. This is
148   // expanded late to two scalar stores and a stride 0 vector load.
149   SPLAT_VECTOR_SPLIT_I64_VL,
150   // Read VLENB CSR
151   READ_VLENB,
152   // Truncates a RVV integer vector by one power-of-two. Carries both an extra
153   // mask and VL operand.
154   TRUNCATE_VECTOR_VL,
155   // Matches the semantics of vslideup/vslidedown. The first operand is the
156   // pass-thru operand, the second is the source vector, the third is the
157   // XLenVT index (either constant or non-constant), the fourth is the mask
158   // and the fifth the VL.
159   VSLIDEUP_VL,
160   VSLIDEDOWN_VL,
161   // Matches the semantics of vslide1up/slide1down. The first operand is the
162   // source vector, the second is the XLenVT scalar value. The third and fourth
163   // operands are the mask and VL operands.
164   VSLIDE1UP_VL,
165   VSLIDE1DOWN_VL,
166   // Matches the semantics of the vid.v instruction, with a mask and VL
167   // operand.
168   VID_VL,
169   // Matches the semantics of the vfcnvt.rod function (Convert double-width
170   // float to single-width float, rounding towards odd). Takes a double-width
171   // float vector and produces a single-width float vector. Also has a mask and
172   // VL operand.
173   VFNCVT_ROD_VL,
174   // These nodes match the semantics of the corresponding RVV vector reduction
175   // instructions. They produce a vector result which is the reduction
176   // performed over the second vector operand plus the first element of the
177   // third vector operand. The first operand is the pass-thru operand. The
178   // second operand is an unconstrained vector type, and the result, first, and
179   // third operand's types are expected to be the corresponding full-width
180   // LMUL=1 type for the second operand:
181   //   nxv8i8 = vecreduce_add nxv8i8, nxv32i8, nxv8i8
182   //   nxv2i32 = vecreduce_add nxv2i32, nxv8i32, nxv2i32
183   // The different in types does introduce extra vsetvli instructions but
184   // similarly it reduces the number of registers consumed per reduction.
185   // Also has a mask and VL operand.
186   VECREDUCE_ADD_VL,
187   VECREDUCE_UMAX_VL,
188   VECREDUCE_SMAX_VL,
189   VECREDUCE_UMIN_VL,
190   VECREDUCE_SMIN_VL,
191   VECREDUCE_AND_VL,
192   VECREDUCE_OR_VL,
193   VECREDUCE_XOR_VL,
194   VECREDUCE_FADD_VL,
195   VECREDUCE_SEQ_FADD_VL,
196   VECREDUCE_FMIN_VL,
197   VECREDUCE_FMAX_VL,
198 
199   // Vector binary and unary ops with a mask as a third operand, and VL as a
200   // fourth operand.
201   // FIXME: Can we replace these with ISD::VP_*?
202   ADD_VL,
203   AND_VL,
204   MUL_VL,
205   OR_VL,
206   SDIV_VL,
207   SHL_VL,
208   SREM_VL,
209   SRA_VL,
210   SRL_VL,
211   SUB_VL,
212   UDIV_VL,
213   UREM_VL,
214   XOR_VL,
215 
216   SADDSAT_VL,
217   UADDSAT_VL,
218   SSUBSAT_VL,
219   USUBSAT_VL,
220 
221   FADD_VL,
222   FSUB_VL,
223   FMUL_VL,
224   FDIV_VL,
225   FNEG_VL,
226   FABS_VL,
227   FSQRT_VL,
228   FMA_VL,
229   FCOPYSIGN_VL,
230   SMIN_VL,
231   SMAX_VL,
232   UMIN_VL,
233   UMAX_VL,
234   FMINNUM_VL,
235   FMAXNUM_VL,
236   MULHS_VL,
237   MULHU_VL,
238   FP_TO_SINT_VL,
239   FP_TO_UINT_VL,
240   SINT_TO_FP_VL,
241   UINT_TO_FP_VL,
242   FP_ROUND_VL,
243   FP_EXTEND_VL,
244 
245   // Widening instructions
246   VWMUL_VL,
247   VWMULU_VL,
248   VWMULSU_VL,
249   VWADDU_VL,
250 
251   // Vector compare producing a mask. Fourth operand is input mask. Fifth
252   // operand is VL.
253   SETCC_VL,
254 
255   // Vector select with an additional VL operand. This operation is unmasked.
256   VSELECT_VL,
257   // Vector select with operand #2 (the value when the condition is false) tied
258   // to the destination and an additional VL operand. This operation is
259   // unmasked.
260   VP_MERGE_VL,
261 
262   // Mask binary operators.
263   VMAND_VL,
264   VMOR_VL,
265   VMXOR_VL,
266 
267   // Set mask vector to all zeros or ones.
268   VMCLR_VL,
269   VMSET_VL,
270 
271   // Matches the semantics of vrgather.vx and vrgather.vv with an extra operand
272   // for VL.
273   VRGATHER_VX_VL,
274   VRGATHER_VV_VL,
275   VRGATHEREI16_VV_VL,
276 
277   // Vector sign/zero extend with additional mask & VL operands.
278   VSEXT_VL,
279   VZEXT_VL,
280 
281   //  vcpop.m with additional mask and VL operands.
282   VCPOP_VL,
283 
284   // Reads value of CSR.
285   // The first operand is a chain pointer. The second specifies address of the
286   // required CSR. Two results are produced, the read value and the new chain
287   // pointer.
288   READ_CSR,
289   // Write value to CSR.
290   // The first operand is a chain pointer, the second specifies address of the
291   // required CSR and the third is the value to write. The result is the new
292   // chain pointer.
293   WRITE_CSR,
294   // Read and write value of CSR.
295   // The first operand is a chain pointer, the second specifies address of the
296   // required CSR and the third is the value to write. Two results are produced,
297   // the value read before the modification and the new chain pointer.
298   SWAP_CSR,
299 
300   // FP to 32 bit int conversions for RV64. These are used to keep track of the
301   // result being sign extended to 64 bit. These saturate out of range inputs.
302   STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE,
303   STRICT_FCVT_WU_RV64,
304 
305   // Memory opcodes start here.
306   VLE_VL = ISD::FIRST_TARGET_MEMORY_OPCODE,
307   VSE_VL,
308 
309   // WARNING: Do not add anything in the end unless you want the node to
310   // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
311   // opcodes will be thought as target memory ops!
312 };
313 } // namespace RISCVISD
314 
315 class RISCVTargetLowering : public TargetLowering {
316   const RISCVSubtarget &Subtarget;
317 
318 public:
319   explicit RISCVTargetLowering(const TargetMachine &TM,
320                                const RISCVSubtarget &STI);
321 
322   const RISCVSubtarget &getSubtarget() const { return Subtarget; }
323 
324   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
325                           MachineFunction &MF,
326                           unsigned Intrinsic) const override;
327   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
328                              unsigned AS,
329                              Instruction *I = nullptr) const override;
330   bool isLegalICmpImmediate(int64_t Imm) const override;
331   bool isLegalAddImmediate(int64_t Imm) const override;
332   bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
333   bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
334   bool isZExtFree(SDValue Val, EVT VT2) const override;
335   bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
336   bool isCheapToSpeculateCttz() const override;
337   bool isCheapToSpeculateCtlz() const override;
338   bool hasAndNotCompare(SDValue Y) const override;
339   bool shouldSinkOperands(Instruction *I,
340                           SmallVectorImpl<Use *> &Ops) const override;
341   bool isFPImmLegal(const APFloat &Imm, EVT VT,
342                     bool ForCodeSize) const override;
343 
344   bool softPromoteHalfType() const override { return true; }
345 
346   /// Return the register type for a given MVT, ensuring vectors are treated
347   /// as a series of gpr sized integers.
348   MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
349                                     EVT VT) const override;
350 
351   /// Return the number of registers for a given MVT, ensuring vectors are
352   /// treated as a series of gpr sized integers.
353   unsigned getNumRegistersForCallingConv(LLVMContext &Context,
354                                          CallingConv::ID CC,
355                                          EVT VT) const override;
356 
357   /// Return true if the given shuffle mask can be codegen'd directly, or if it
358   /// should be stack expanded.
359   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
360 
361   bool hasBitPreservingFPLogic(EVT VT) const override;
362   bool
363   shouldExpandBuildVectorWithShuffles(EVT VT,
364                                       unsigned DefinedValues) const override;
365 
366   // Provide custom lowering hooks for some operations.
367   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
368   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
369                           SelectionDAG &DAG) const override;
370 
371   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
372 
373   bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
374                                     const APInt &DemandedElts,
375                                     TargetLoweringOpt &TLO) const override;
376 
377   void computeKnownBitsForTargetNode(const SDValue Op,
378                                      KnownBits &Known,
379                                      const APInt &DemandedElts,
380                                      const SelectionDAG &DAG,
381                                      unsigned Depth) const override;
382   unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
383                                            const APInt &DemandedElts,
384                                            const SelectionDAG &DAG,
385                                            unsigned Depth) const override;
386 
387   // This method returns the name of a target specific DAG node.
388   const char *getTargetNodeName(unsigned Opcode) const override;
389 
390   ConstraintType getConstraintType(StringRef Constraint) const override;
391 
392   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
393 
394   std::pair<unsigned, const TargetRegisterClass *>
395   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
396                                StringRef Constraint, MVT VT) const override;
397 
398   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
399                                     std::vector<SDValue> &Ops,
400                                     SelectionDAG &DAG) const override;
401 
402   MachineBasicBlock *
403   EmitInstrWithCustomInserter(MachineInstr &MI,
404                               MachineBasicBlock *BB) const override;
405 
406   void AdjustInstrPostInstrSelection(MachineInstr &MI,
407                                      SDNode *Node) const override;
408 
409   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
410                          EVT VT) const override;
411 
412   bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
413     return VT.isScalarInteger();
414   }
415   bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }
416 
417   bool shouldInsertFencesForAtomic(const Instruction *I) const override {
418     return isa<LoadInst>(I) || isa<StoreInst>(I);
419   }
420   Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
421                                 AtomicOrdering Ord) const override;
422   Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
423                                  AtomicOrdering Ord) const override;
424 
425   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
426                                   EVT VT) const override;
427 
428   ISD::NodeType getExtendForAtomicOps() const override {
429     return ISD::SIGN_EXTEND;
430   }
431 
432   ISD::NodeType getExtendForAtomicCmpSwapArg() const override {
433     return ISD::SIGN_EXTEND;
434   }
435 
436   bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
437     if (DAG.getMachineFunction().getFunction().hasMinSize())
438       return false;
439     return true;
440   }
441   bool isDesirableToCommuteWithShift(const SDNode *N,
442                                      CombineLevel Level) const override;
443 
444   /// If a physical register, this returns the register that receives the
445   /// exception address on entry to an EH pad.
446   Register
447   getExceptionPointerRegister(const Constant *PersonalityFn) const override;
448 
449   /// If a physical register, this returns the register that receives the
450   /// exception typeid on entry to a landing pad.
451   Register
452   getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
453 
454   bool shouldExtendTypeInLibCall(EVT Type) const override;
455   bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;
456 
457   /// Returns the register with the specified architectural or ABI name. This
458   /// method is necessary to lower the llvm.read_register.* and
459   /// llvm.write_register.* intrinsics. Allocatable registers must be reserved
460   /// with the clang -ffixed-xX flag for access to be allowed.
461   Register getRegisterByName(const char *RegName, LLT VT,
462                              const MachineFunction &MF) const override;
463 
464   // Lower incoming arguments, copy physregs into vregs
465   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
466                                bool IsVarArg,
467                                const SmallVectorImpl<ISD::InputArg> &Ins,
468                                const SDLoc &DL, SelectionDAG &DAG,
469                                SmallVectorImpl<SDValue> &InVals) const override;
470   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
471                       bool IsVarArg,
472                       const SmallVectorImpl<ISD::OutputArg> &Outs,
473                       LLVMContext &Context) const override;
474   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
475                       const SmallVectorImpl<ISD::OutputArg> &Outs,
476                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
477                       SelectionDAG &DAG) const override;
478   SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
479                     SmallVectorImpl<SDValue> &InVals) const override;
480   template <class NodeTy>
481   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
482 
483   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
484                                          Type *Ty) const override {
485     return true;
486   }
487   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
488   bool shouldConsiderGEPOffsetSplit() const override { return true; }
489 
490   bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
491                               SDValue C) const override;
492 
493   bool isMulAddWithConstProfitable(const SDValue &AddNode,
494                                    const SDValue &ConstNode) const override;
495 
496   TargetLowering::AtomicExpansionKind
497   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
498   Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI,
499                                       Value *AlignedAddr, Value *Incr,
500                                       Value *Mask, Value *ShiftAmt,
501                                       AtomicOrdering Ord) const override;
502   TargetLowering::AtomicExpansionKind
503   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
504   Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder,
505                                           AtomicCmpXchgInst *CI,
506                                           Value *AlignedAddr, Value *CmpVal,
507                                           Value *NewVal, Value *Mask,
508                                           AtomicOrdering Ord) const override;
509 
510   /// Returns true if the target allows unaligned memory accesses of the
511   /// specified type.
512   bool allowsMisalignedMemoryAccesses(
513       EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
514       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
515       bool *Fast = nullptr) const override;
516 
517   bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL,
518                                    SDValue Val, SDValue *Parts,
519                                    unsigned NumParts, MVT PartVT,
520                                    Optional<CallingConv::ID> CC) const override;
521 
522   SDValue
523   joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
524                              const SDValue *Parts, unsigned NumParts,
525                              MVT PartVT, EVT ValueVT,
526                              Optional<CallingConv::ID> CC) const override;
527 
528   static RISCVII::VLMUL getLMUL(MVT VT);
529   static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
530   static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
531   static unsigned getRegClassIDForVecVT(MVT VT);
532   static std::pair<unsigned, unsigned>
533   decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT,
534                                            unsigned InsertExtractIdx,
535                                            const RISCVRegisterInfo *TRI);
536   MVT getContainerForFixedLengthVector(MVT VT) const;
537 
538   bool shouldRemoveExtendFromGSIndex(EVT VT) const override;
539 
540   bool isLegalElementTypeForRVV(Type *ScalarTy) const;
541 
542   bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
543 
544   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
545                         SmallVectorImpl<SDNode *> &Created) const override;
546 
547   unsigned getJumpTableEncoding() const override;
548 
549   const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
550                                           const MachineBasicBlock *MBB,
551                                           unsigned uid,
552                                           MCContext &Ctx) const override;
553 
554 private:
555   /// RISCVCCAssignFn - This target-specific function extends the default
556   /// CCValAssign with additional information used to lower RISC-V calling
557   /// conventions.
558   typedef bool RISCVCCAssignFn(const DataLayout &DL, RISCVABI::ABI,
559                                unsigned ValNo, MVT ValVT, MVT LocVT,
560                                CCValAssign::LocInfo LocInfo,
561                                ISD::ArgFlagsTy ArgFlags, CCState &State,
562                                bool IsFixed, bool IsRet, Type *OrigTy,
563                                const RISCVTargetLowering &TLI,
564                                Optional<unsigned> FirstMaskArgument);
565 
566   void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
567                         const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
568                         RISCVCCAssignFn Fn) const;
569   void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
570                          const SmallVectorImpl<ISD::OutputArg> &Outs,
571                          bool IsRet, CallLoweringInfo *CLI,
572                          RISCVCCAssignFn Fn) const;
573 
574   SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
575                            bool UseGOT) const;
576   SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
577 
578   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
579   SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
580   SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
581   SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
582   SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
583   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
584   SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
585   SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
586   SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
587   SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
588   SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
589   SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
590   SDValue lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const;
591   SDValue lowerVectorMaskSplat(SDValue Op, SelectionDAG &DAG) const;
592   SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
593                              int64_t ExtTrueVal) const;
594   SDValue lowerVectorMaskTrunc(SDValue Op, SelectionDAG &DAG) const;
595   SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
596   SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
597   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
598   SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
599   SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
600   SDValue lowerVPREDUCE(SDValue Op, SelectionDAG &DAG) const;
601   SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
602   SDValue lowerVectorMaskVecReduction(SDValue Op, SelectionDAG &DAG,
603                                       bool IsVP) const;
604   SDValue lowerFPVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
605   SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
606   SDValue lowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
607   SDValue lowerSTEP_VECTOR(SDValue Op, SelectionDAG &DAG) const;
608   SDValue lowerVECTOR_REVERSE(SDValue Op, SelectionDAG &DAG) const;
609   SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
610   SDValue lowerMaskedLoad(SDValue Op, SelectionDAG &DAG) const;
611   SDValue lowerMaskedStore(SDValue Op, SelectionDAG &DAG) const;
612   SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,
613                                                SelectionDAG &DAG) const;
614   SDValue lowerMaskedGather(SDValue Op, SelectionDAG &DAG) const;
615   SDValue lowerMaskedScatter(SDValue Op, SelectionDAG &DAG) const;
616   SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;
617   SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;
618   SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;
619   SDValue lowerFixedLengthVectorLogicOpToRVV(SDValue Op, SelectionDAG &DAG,
620                                              unsigned MaskOpc,
621                                              unsigned VecOpc) const;
622   SDValue lowerFixedLengthVectorShiftToRVV(SDValue Op, SelectionDAG &DAG) const;
623   SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
624                                             SelectionDAG &DAG) const;
625   SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc,
626                             bool HasMask = true) const;
627   SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc) const;
628   SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc,
629                          unsigned VecOpc) const;
630   SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,
631                                             unsigned ExtendOpc) const;
632   SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
633   SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
634 
635   SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;
636   SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;
637 
638   bool isEligibleForTailCallOptimization(
639       CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
640       const SmallVector<CCValAssign, 16> &ArgLocs) const;
641 
642   /// Generate error diagnostics if any register used by CC has been marked
643   /// reserved.
644   void validateCCReservedRegs(
645       const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
646       MachineFunction &MF) const;
647 
648   bool useRVVForFixedLengthVectorVT(MVT VT) const;
649 
650   MVT getVPExplicitVectorLengthTy() const override;
651 
652   /// RVV code generation for fixed length vectors does not lower all
653   /// BUILD_VECTORs. This makes BUILD_VECTOR legalisation a source of stores to
654   /// merge. However, merging them creates a BUILD_VECTOR that is just as
655   /// illegal as the original, thus leading to an infinite legalisation loop.
656   /// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types,
657   /// this override can be removed.
658   bool mergeStoresAfterLegalization(EVT VT) const override;
659 
660   /// Disable normalizing
661   /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
662   /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y))
663   /// RISCV doesn't have flags so it's better to perform the and/or in a GPR.
664   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override {
665     return false;
666   };
667 };
668 
669 namespace RISCV {
670 // We use 64 bits as the known part in the scalable vector types.
671 static constexpr unsigned RVVBitsPerBlock = 64;
672 } // namespace RISCV
673 
674 namespace RISCVVIntrinsicsTable {
675 
676 struct RISCVVIntrinsicInfo {
677   unsigned IntrinsicID;
678   uint8_t SplatOperand;
679   uint8_t VLOperand;
680   bool hasSplatOperand() const {
681     // 0xF is not valid. See NoSplatOperand in IntrinsicsRISCV.td.
682     return SplatOperand != 0xF;
683   }
684   bool hasVLOperand() const {
685     // 0x1F is not valid. See NoVLOperand in IntrinsicsRISCV.td.
686     return VLOperand != 0x1F;
687   }
688 };
689 
690 using namespace RISCV;
691 
692 #define GET_RISCVVIntrinsicsTable_DECL
693 #include "RISCVGenSearchableTables.inc"
694 
695 } // end namespace RISCVVIntrinsicsTable
696 
697 } // end namespace llvm
698 
699 #endif
700