xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.h (revision af23369a6deaaeb612ab266eb88b8bb8d560c322)
1 //===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
15 #define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
16 
17 #include "RISCV.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 
22 namespace llvm {
23 class RISCVSubtarget;
24 struct RISCVRegisterInfo;
25 namespace RISCVISD {
26 enum NodeType : unsigned {
27   FIRST_NUMBER = ISD::BUILTIN_OP_END,
28   RET_FLAG,
29   URET_FLAG,
30   SRET_FLAG,
31   MRET_FLAG,
32   CALL,
33   /// Select with condition operator - This selects between a true value and
34   /// a false value (ops #3 and #4) based on the boolean result of comparing
35   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
36   /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.
37   /// The lhs and rhs are XLenVT integers. The true and false values can be
38   /// integer or floating point.
39   SELECT_CC,
40   BR_CC,
41   BuildPairF64,
42   SplitF64,
43   TAIL,
44 
45   // Add the Lo 12 bits from an address. Selected to ADDI.
46   ADD_LO,
47   // Get the Hi 20 bits from an address. Selected to LUI.
48   HI,
49 
50   // Represents an AUIPC+ADDI pair. Selected to PseudoLLA.
51   LLA,
52 
53   // Selected as PseudoAddTPRel. Used to emit a TP-relative relocation.
54   ADD_TPREL,
55 
56   // Load address.
57   LA_TLS_GD,
58 
59   // Multiply high for signedxunsigned.
60   MULHSU,
61   // RV64I shifts, directly matching the semantics of the named RISC-V
62   // instructions.
63   SLLW,
64   SRAW,
65   SRLW,
66   // 32-bit operations from RV64M that can't be simply matched with a pattern
67   // at instruction selection time. These have undefined behavior for division
68   // by 0 or overflow (divw) like their target independent counterparts.
69   DIVW,
70   DIVUW,
71   REMUW,
72   // RV64IB rotates, directly matching the semantics of the named RISC-V
73   // instructions.
74   ROLW,
75   RORW,
76   // RV64IZbb bit counting instructions directly matching the semantics of the
77   // named RISC-V instructions.
78   CLZW,
79   CTZW,
80   // RV64IB/RV32IB funnel shifts, with the semantics of the named RISC-V
81   // instructions. Operand order is rs1, rs3, rs2/shamt.
82   FSR,
83   FSL,
84   // RV64IB funnel shifts, with the semantics of the named RISC-V instructions.
85   // Operand order is rs1, rs3, rs2/shamt.
86   FSRW,
87   FSLW,
88   // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
89   // XLEN is the only legal integer width.
90   //
91   // FMV_H_X matches the semantics of the FMV.H.X.
92   // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result.
93   // FMV_X_SIGNEXTH is similar to FMV.X.H and has a sign-extended result.
94   // FMV_W_X_RV64 matches the semantics of the FMV.W.X.
95   // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.
96   //
97   // This is a more convenient semantic for producing dagcombines that remove
98   // unnecessary GPR->FPR->GPR moves.
99   FMV_H_X,
100   FMV_X_ANYEXTH,
101   FMV_X_SIGNEXTH,
102   FMV_W_X_RV64,
103   FMV_X_ANYEXTW_RV64,
104   // FP to XLen int conversions. Corresponds to fcvt.l(u).s/d/h on RV64 and
105   // fcvt.w(u).s/d/h on RV32. Unlike FP_TO_S/UINT these saturate out of
106   // range inputs. These are used for FP_TO_S/UINT_SAT lowering. Rounding mode
107   // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
108   FCVT_X,
109   FCVT_XU,
110   // FP to 32 bit int conversions for RV64. These are used to keep track of the
111   // result being sign extended to 64 bit. These saturate out of range inputs.
112   // Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering. Rounding mode
113   // is passed as a TargetConstant operand using the RISCVFPRndMode enum.
114   FCVT_W_RV64,
115   FCVT_WU_RV64,
116   // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
117   // (returns (Lo, Hi)). It takes a chain operand.
118   READ_CYCLE_WIDE,
119   // Generalized Reverse and Generalized Or-Combine - directly matching the
120   // semantics of the named RISC-V instructions. Lowered as custom nodes as
121   // TableGen chokes when faced with commutative permutations in deeply-nested
122   // DAGs. Each node takes an input operand and a control operand and outputs a
123   // bit-manipulated version of input. All operands are i32 or XLenVT.
124   GREV,
125   GREVW,
126   GORC,
127   GORCW,
128   SHFL,
129   SHFLW,
130   UNSHFL,
131   UNSHFLW,
132   // Bit Compress/Decompress implement the generic bit extract and bit deposit
133   // functions. This operation is also referred to as bit gather/scatter, bit
134   // pack/unpack, parallel extract/deposit, compress/expand, or right
135   // compress/right expand.
136   BCOMPRESS,
137   BCOMPRESSW,
138   BDECOMPRESS,
139   BDECOMPRESSW,
140   // The bit field place (bfp) instruction places up to XLEN/2 LSB bits from rs2
141   // into the value in rs1. The upper bits of rs2 control the length of the bit
142   // field and target position. The layout of rs2 is chosen in a way that makes
143   // it possible to construct rs2 easily using pack[h] instructions and/or
144   // andi/lui.
145   BFP,
146   BFPW,
147   // Vector Extension
148   // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand
149   // for the VL value to be used for the operation. The first operand is
150   // passthru operand.
151   VMV_V_X_VL,
152   // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand
153   // for the VL value to be used for the operation. The first operand is
154   // passthru operand.
155   VFMV_V_F_VL,
156   // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign
157   // extended from the vector element size.
158   VMV_X_S,
159   // VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand.
160   VMV_S_X_VL,
161   // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand.
162   VFMV_S_F_VL,
163   // Splats an 64-bit value that has been split into two i32 parts. This is
164   // expanded late to two scalar stores and a stride 0 vector load.
165   // The first operand is passthru operand.
166   SPLAT_VECTOR_SPLIT_I64_VL,
167   // Read VLENB CSR
168   READ_VLENB,
169   // Truncates a RVV integer vector by one power-of-two. Carries both an extra
170   // mask and VL operand.
171   TRUNCATE_VECTOR_VL,
172   // Matches the semantics of vslideup/vslidedown. The first operand is the
173   // pass-thru operand, the second is the source vector, the third is the
174   // XLenVT index (either constant or non-constant), the fourth is the mask
175   // and the fifth the VL.
176   VSLIDEUP_VL,
177   VSLIDEDOWN_VL,
178   // Matches the semantics of vslide1up/slide1down. The first operand is
179   // passthru operand, the second is source vector, third is the XLenVT scalar
180   // value. The fourth and fifth operands are the mask and VL operands.
181   VSLIDE1UP_VL,
182   VSLIDE1DOWN_VL,
183   // Matches the semantics of the vid.v instruction, with a mask and VL
184   // operand.
185   VID_VL,
186   // Matches the semantics of the vfcnvt.rod function (Convert double-width
187   // float to single-width float, rounding towards odd). Takes a double-width
188   // float vector and produces a single-width float vector. Also has a mask and
189   // VL operand.
190   VFNCVT_ROD_VL,
191   // These nodes match the semantics of the corresponding RVV vector reduction
192   // instructions. They produce a vector result which is the reduction
193   // performed over the second vector operand plus the first element of the
194   // third vector operand. The first operand is the pass-thru operand. The
195   // second operand is an unconstrained vector type, and the result, first, and
196   // third operand's types are expected to be the corresponding full-width
197   // LMUL=1 type for the second operand:
198   //   nxv8i8 = vecreduce_add nxv8i8, nxv32i8, nxv8i8
199   //   nxv2i32 = vecreduce_add nxv2i32, nxv8i32, nxv2i32
200   // The different in types does introduce extra vsetvli instructions but
201   // similarly it reduces the number of registers consumed per reduction.
202   // Also has a mask and VL operand.
203   VECREDUCE_ADD_VL,
204   VECREDUCE_UMAX_VL,
205   VECREDUCE_SMAX_VL,
206   VECREDUCE_UMIN_VL,
207   VECREDUCE_SMIN_VL,
208   VECREDUCE_AND_VL,
209   VECREDUCE_OR_VL,
210   VECREDUCE_XOR_VL,
211   VECREDUCE_FADD_VL,
212   VECREDUCE_SEQ_FADD_VL,
213   VECREDUCE_FMIN_VL,
214   VECREDUCE_FMAX_VL,
215 
216   // Vector binary and unary ops with a mask as a third operand, and VL as a
217   // fourth operand.
218   // FIXME: Can we replace these with ISD::VP_*?
219   ADD_VL,
220   AND_VL,
221   MUL_VL,
222   OR_VL,
223   SDIV_VL,
224   SHL_VL,
225   SREM_VL,
226   SRA_VL,
227   SRL_VL,
228   SUB_VL,
229   UDIV_VL,
230   UREM_VL,
231   XOR_VL,
232 
233   SADDSAT_VL,
234   UADDSAT_VL,
235   SSUBSAT_VL,
236   USUBSAT_VL,
237 
238   FADD_VL,
239   FSUB_VL,
240   FMUL_VL,
241   FDIV_VL,
242   FNEG_VL,
243   FABS_VL,
244   FSQRT_VL,
245   VFMADD_VL,
246   VFNMADD_VL,
247   VFMSUB_VL,
248   VFNMSUB_VL,
249   FCOPYSIGN_VL,
250   SMIN_VL,
251   SMAX_VL,
252   UMIN_VL,
253   UMAX_VL,
254   FMINNUM_VL,
255   FMAXNUM_VL,
256   MULHS_VL,
257   MULHU_VL,
258   FP_TO_SINT_VL,
259   FP_TO_UINT_VL,
260   SINT_TO_FP_VL,
261   UINT_TO_FP_VL,
262   FP_ROUND_VL,
263   FP_EXTEND_VL,
264 
265   // Widening instructions
266   VWMUL_VL,
267   VWMULU_VL,
268   VWMULSU_VL,
269   VWADD_VL,
270   VWADDU_VL,
271   VWSUB_VL,
272   VWSUBU_VL,
273   VWADD_W_VL,
274   VWADDU_W_VL,
275   VWSUB_W_VL,
276   VWSUBU_W_VL,
277 
278   // Vector compare producing a mask. Fourth operand is input mask. Fifth
279   // operand is VL.
280   SETCC_VL,
281 
282   // Vector select with an additional VL operand. This operation is unmasked.
283   VSELECT_VL,
284   // Vector select with operand #2 (the value when the condition is false) tied
285   // to the destination and an additional VL operand. This operation is
286   // unmasked.
287   VP_MERGE_VL,
288 
289   // Mask binary operators.
290   VMAND_VL,
291   VMOR_VL,
292   VMXOR_VL,
293 
294   // Set mask vector to all zeros or ones.
295   VMCLR_VL,
296   VMSET_VL,
297 
298   // Matches the semantics of vrgather.vx and vrgather.vv with extra operands
299   // for passthru and VL. Operands are (src, index, mask, passthru, vl).
300   VRGATHER_VX_VL,
301   VRGATHER_VV_VL,
302   VRGATHEREI16_VV_VL,
303 
304   // Vector sign/zero extend with additional mask & VL operands.
305   VSEXT_VL,
306   VZEXT_VL,
307 
308   //  vcpop.m with additional mask and VL operands.
309   VCPOP_VL,
310 
311   // Reads value of CSR.
312   // The first operand is a chain pointer. The second specifies address of the
313   // required CSR. Two results are produced, the read value and the new chain
314   // pointer.
315   READ_CSR,
316   // Write value to CSR.
317   // The first operand is a chain pointer, the second specifies address of the
318   // required CSR and the third is the value to write. The result is the new
319   // chain pointer.
320   WRITE_CSR,
321   // Read and write value of CSR.
322   // The first operand is a chain pointer, the second specifies address of the
323   // required CSR and the third is the value to write. Two results are produced,
324   // the value read before the modification and the new chain pointer.
325   SWAP_CSR,
326 
327   // FP to 32 bit int conversions for RV64. These are used to keep track of the
328   // result being sign extended to 64 bit. These saturate out of range inputs.
329   STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE,
330   STRICT_FCVT_WU_RV64,
331 
332   // WARNING: Do not add anything in the end unless you want the node to
333   // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
334   // opcodes will be thought as target memory ops!
335 
336   // Load address.
337   LA = ISD::FIRST_TARGET_MEMORY_OPCODE,
338   LA_TLS_IE,
339 };
340 } // namespace RISCVISD
341 
342 namespace RISCV {
343 // We use 64 bits as the known part in the scalable vector types.
344 static constexpr unsigned RVVBitsPerBlock = 64;
345 } // namespace RISCV
346 
347 class RISCVTargetLowering : public TargetLowering {
348   const RISCVSubtarget &Subtarget;
349 
350 public:
351   explicit RISCVTargetLowering(const TargetMachine &TM,
352                                const RISCVSubtarget &STI);
353 
354   const RISCVSubtarget &getSubtarget() const { return Subtarget; }
355 
356   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
357                           MachineFunction &MF,
358                           unsigned Intrinsic) const override;
359   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
360                              unsigned AS,
361                              Instruction *I = nullptr) const override;
362   bool isLegalICmpImmediate(int64_t Imm) const override;
363   bool isLegalAddImmediate(int64_t Imm) const override;
364   bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
365   bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
366   bool isZExtFree(SDValue Val, EVT VT2) const override;
367   bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
368   bool signExtendConstant(const ConstantInt *CI) const override;
369   bool isCheapToSpeculateCttz() const override;
370   bool isCheapToSpeculateCtlz() const override;
371   bool hasAndNotCompare(SDValue Y) const override;
372   bool hasBitTest(SDValue X, SDValue Y) const override;
373   bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
374       SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
375       unsigned OldShiftOpcode, unsigned NewShiftOpcode,
376       SelectionDAG &DAG) const override;
377   bool shouldSinkOperands(Instruction *I,
378                           SmallVectorImpl<Use *> &Ops) const override;
379   bool shouldScalarizeBinop(SDValue VecOp) const override;
380   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
381   bool isFPImmLegal(const APFloat &Imm, EVT VT,
382                     bool ForCodeSize) const override;
383 
384   bool softPromoteHalfType() const override { return true; }
385 
386   /// Return the register type for a given MVT, ensuring vectors are treated
387   /// as a series of gpr sized integers.
388   MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
389                                     EVT VT) const override;
390 
391   /// Return the number of registers for a given MVT, ensuring vectors are
392   /// treated as a series of gpr sized integers.
393   unsigned getNumRegistersForCallingConv(LLVMContext &Context,
394                                          CallingConv::ID CC,
395                                          EVT VT) const override;
396 
397   /// Return true if the given shuffle mask can be codegen'd directly, or if it
398   /// should be stack expanded.
399   bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
400 
401   bool hasBitPreservingFPLogic(EVT VT) const override;
402   bool
403   shouldExpandBuildVectorWithShuffles(EVT VT,
404                                       unsigned DefinedValues) const override;
405 
406   // Provide custom lowering hooks for some operations.
407   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
408   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
409                           SelectionDAG &DAG) const override;
410 
411   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
412 
413   bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
414                                     const APInt &DemandedElts,
415                                     TargetLoweringOpt &TLO) const override;
416 
417   void computeKnownBitsForTargetNode(const SDValue Op,
418                                      KnownBits &Known,
419                                      const APInt &DemandedElts,
420                                      const SelectionDAG &DAG,
421                                      unsigned Depth) const override;
422   unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
423                                            const APInt &DemandedElts,
424                                            const SelectionDAG &DAG,
425                                            unsigned Depth) const override;
426 
427   const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const override;
428 
429   // This method returns the name of a target specific DAG node.
430   const char *getTargetNodeName(unsigned Opcode) const override;
431 
432   ConstraintType getConstraintType(StringRef Constraint) const override;
433 
434   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
435 
436   std::pair<unsigned, const TargetRegisterClass *>
437   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
438                                StringRef Constraint, MVT VT) const override;
439 
440   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
441                                     std::vector<SDValue> &Ops,
442                                     SelectionDAG &DAG) const override;
443 
444   MachineBasicBlock *
445   EmitInstrWithCustomInserter(MachineInstr &MI,
446                               MachineBasicBlock *BB) const override;
447 
448   void AdjustInstrPostInstrSelection(MachineInstr &MI,
449                                      SDNode *Node) const override;
450 
451   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
452                          EVT VT) const override;
453 
454   bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
455     return VT.isScalarInteger();
456   }
457   bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }
458 
459   bool shouldInsertFencesForAtomic(const Instruction *I) const override {
460     return isa<LoadInst>(I) || isa<StoreInst>(I);
461   }
462   Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
463                                 AtomicOrdering Ord) const override;
464   Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
465                                  AtomicOrdering Ord) const override;
466 
467   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
468                                   EVT VT) const override;
469 
470   ISD::NodeType getExtendForAtomicOps() const override {
471     return ISD::SIGN_EXTEND;
472   }
473 
474   ISD::NodeType getExtendForAtomicCmpSwapArg() const override {
475     return ISD::SIGN_EXTEND;
476   }
477 
478   bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
479     if (DAG.getMachineFunction().getFunction().hasMinSize())
480       return false;
481     return true;
482   }
483   bool isDesirableToCommuteWithShift(const SDNode *N,
484                                      CombineLevel Level) const override;
485 
486   /// If a physical register, this returns the register that receives the
487   /// exception address on entry to an EH pad.
488   Register
489   getExceptionPointerRegister(const Constant *PersonalityFn) const override;
490 
491   /// If a physical register, this returns the register that receives the
492   /// exception typeid on entry to a landing pad.
493   Register
494   getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
495 
496   bool shouldExtendTypeInLibCall(EVT Type) const override;
497   bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;
498 
499   /// Returns the register with the specified architectural or ABI name. This
500   /// method is necessary to lower the llvm.read_register.* and
501   /// llvm.write_register.* intrinsics. Allocatable registers must be reserved
502   /// with the clang -ffixed-xX flag for access to be allowed.
503   Register getRegisterByName(const char *RegName, LLT VT,
504                              const MachineFunction &MF) const override;
505 
506   // Lower incoming arguments, copy physregs into vregs
507   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
508                                bool IsVarArg,
509                                const SmallVectorImpl<ISD::InputArg> &Ins,
510                                const SDLoc &DL, SelectionDAG &DAG,
511                                SmallVectorImpl<SDValue> &InVals) const override;
512   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
513                       bool IsVarArg,
514                       const SmallVectorImpl<ISD::OutputArg> &Outs,
515                       LLVMContext &Context) const override;
516   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
517                       const SmallVectorImpl<ISD::OutputArg> &Outs,
518                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
519                       SelectionDAG &DAG) const override;
520   SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
521                     SmallVectorImpl<SDValue> &InVals) const override;
522 
523   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
524                                          Type *Ty) const override;
525   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
526   bool shouldConsiderGEPOffsetSplit() const override { return true; }
527 
528   bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
529                               SDValue C) const override;
530 
531   bool isMulAddWithConstProfitable(SDValue AddNode,
532                                    SDValue ConstNode) const override;
533 
534   TargetLowering::AtomicExpansionKind
535   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
536   Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI,
537                                       Value *AlignedAddr, Value *Incr,
538                                       Value *Mask, Value *ShiftAmt,
539                                       AtomicOrdering Ord) const override;
540   TargetLowering::AtomicExpansionKind
541   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
542   Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder,
543                                           AtomicCmpXchgInst *CI,
544                                           Value *AlignedAddr, Value *CmpVal,
545                                           Value *NewVal, Value *Mask,
546                                           AtomicOrdering Ord) const override;
547 
548   /// Returns true if the target allows unaligned memory accesses of the
549   /// specified type.
550   bool allowsMisalignedMemoryAccesses(
551       EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
552       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
553       bool *Fast = nullptr) const override;
554 
555   bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL,
556                                    SDValue Val, SDValue *Parts,
557                                    unsigned NumParts, MVT PartVT,
558                                    Optional<CallingConv::ID> CC) const override;
559 
560   SDValue
561   joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
562                              const SDValue *Parts, unsigned NumParts,
563                              MVT PartVT, EVT ValueVT,
564                              Optional<CallingConv::ID> CC) const override;
565 
566   static RISCVII::VLMUL getLMUL(MVT VT);
567   inline static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize,
568                                       unsigned MinSize) {
569     // Original equation:
570     //   VLMAX = (VectorBits / EltSize) * LMUL
571     //   where LMUL = MinSize / RISCV::RVVBitsPerBlock
572     // The following equations have been reordered to prevent loss of precision
573     // when calculating fractional LMUL.
574     return ((VectorBits / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
575   };
576   static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
577   static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
578   static unsigned getRegClassIDForVecVT(MVT VT);
579   static std::pair<unsigned, unsigned>
580   decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT,
581                                            unsigned InsertExtractIdx,
582                                            const RISCVRegisterInfo *TRI);
583   MVT getContainerForFixedLengthVector(MVT VT) const;
584 
585   bool shouldRemoveExtendFromGSIndex(EVT IndexVT, EVT DataVT) const override;
586 
587   bool isLegalElementTypeForRVV(Type *ScalarTy) const;
588 
589   bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
590 
591   SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
592                         SmallVectorImpl<SDNode *> &Created) const override;
593 
594   unsigned getJumpTableEncoding() const override;
595 
596   const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
597                                           const MachineBasicBlock *MBB,
598                                           unsigned uid,
599                                           MCContext &Ctx) const override;
600 
601   bool isVScaleKnownToBeAPowerOfTwo() const override;
602 
603 private:
604   /// RISCVCCAssignFn - This target-specific function extends the default
605   /// CCValAssign with additional information used to lower RISC-V calling
606   /// conventions.
607   typedef bool RISCVCCAssignFn(const DataLayout &DL, RISCVABI::ABI,
608                                unsigned ValNo, MVT ValVT, MVT LocVT,
609                                CCValAssign::LocInfo LocInfo,
610                                ISD::ArgFlagsTy ArgFlags, CCState &State,
611                                bool IsFixed, bool IsRet, Type *OrigTy,
612                                const RISCVTargetLowering &TLI,
613                                Optional<unsigned> FirstMaskArgument);
614 
615   void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
616                         const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
617                         RISCVCCAssignFn Fn) const;
618   void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
619                          const SmallVectorImpl<ISD::OutputArg> &Outs,
620                          bool IsRet, CallLoweringInfo *CLI,
621                          RISCVCCAssignFn Fn) const;
622 
623   template <class NodeTy>
624   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
625   SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
626                            bool UseGOT) const;
627   SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
628 
629   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
630   SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
631   SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
632   SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
633   SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
634   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
635   SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
636   SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
637   SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
638   SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
639   SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
640   SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
641   SDValue lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const;
642   SDValue lowerVectorMaskSplat(SDValue Op, SelectionDAG &DAG) const;
643   SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
644                              int64_t ExtTrueVal) const;
645   SDValue lowerVectorMaskTruncLike(SDValue Op, SelectionDAG &DAG) const;
646   SDValue lowerVectorTruncLike(SDValue Op, SelectionDAG &DAG) const;
647   SDValue lowerVectorFPExtendOrRoundLike(SDValue Op, SelectionDAG &DAG) const;
648   SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
649   SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
650   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
651   SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
652   SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
653   SDValue lowerVPREDUCE(SDValue Op, SelectionDAG &DAG) const;
654   SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
655   SDValue lowerVectorMaskVecReduction(SDValue Op, SelectionDAG &DAG,
656                                       bool IsVP) const;
657   SDValue lowerFPVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
658   SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
659   SDValue lowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
660   SDValue lowerSTEP_VECTOR(SDValue Op, SelectionDAG &DAG) const;
661   SDValue lowerVECTOR_REVERSE(SDValue Op, SelectionDAG &DAG) const;
662   SDValue lowerVECTOR_SPLICE(SDValue Op, SelectionDAG &DAG) const;
663   SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
664   SDValue lowerMaskedLoad(SDValue Op, SelectionDAG &DAG) const;
665   SDValue lowerMaskedStore(SDValue Op, SelectionDAG &DAG) const;
666   SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,
667                                                SelectionDAG &DAG) const;
668   SDValue lowerMaskedGather(SDValue Op, SelectionDAG &DAG) const;
669   SDValue lowerMaskedScatter(SDValue Op, SelectionDAG &DAG) const;
670   SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;
671   SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;
672   SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;
673   SDValue lowerFixedLengthVectorLogicOpToRVV(SDValue Op, SelectionDAG &DAG,
674                                              unsigned MaskOpc,
675                                              unsigned VecOpc) const;
676   SDValue lowerFixedLengthVectorShiftToRVV(SDValue Op, SelectionDAG &DAG) const;
677   SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
678                                             SelectionDAG &DAG) const;
679   SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc,
680                             bool HasMask = true) const;
681   SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc) const;
682   SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc,
683                          unsigned VecOpc) const;
684   SDValue lowerVPExtMaskOp(SDValue Op, SelectionDAG &DAG) const;
685   SDValue lowerVPSetCCMaskOp(SDValue Op, SelectionDAG &DAG) const;
686   SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
687                              unsigned RISCVISDOpc) const;
688   SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,
689                                             unsigned ExtendOpc) const;
690   SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
691   SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
692 
693   SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
694 
695   SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;
696   SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;
697 
698   bool isEligibleForTailCallOptimization(
699       CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
700       const SmallVector<CCValAssign, 16> &ArgLocs) const;
701 
702   /// Generate error diagnostics if any register used by CC has been marked
703   /// reserved.
704   void validateCCReservedRegs(
705       const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
706       MachineFunction &MF) const;
707 
708   bool useRVVForFixedLengthVectorVT(MVT VT) const;
709 
710   MVT getVPExplicitVectorLengthTy() const override;
711 
712   /// RVV code generation for fixed length vectors does not lower all
713   /// BUILD_VECTORs. This makes BUILD_VECTOR legalisation a source of stores to
714   /// merge. However, merging them creates a BUILD_VECTOR that is just as
715   /// illegal as the original, thus leading to an infinite legalisation loop.
716   /// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types,
717   /// this override can be removed.
718   bool mergeStoresAfterLegalization(EVT VT) const override;
719 
720   /// Disable normalizing
721   /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
722   /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y))
723   /// RISCV doesn't have flags so it's better to perform the and/or in a GPR.
724   bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override {
725     return false;
726   };
727 };
728 namespace RISCVVIntrinsicsTable {
729 
730 struct RISCVVIntrinsicInfo {
731   unsigned IntrinsicID;
732   uint8_t ScalarOperand;
733   uint8_t VLOperand;
734   bool hasScalarOperand() const {
735     // 0xF is not valid. See NoScalarOperand in IntrinsicsRISCV.td.
736     return ScalarOperand != 0xF;
737   }
738   bool hasVLOperand() const {
739     // 0x1F is not valid. See NoVLOperand in IntrinsicsRISCV.td.
740     return VLOperand != 0x1F;
741   }
742 };
743 
744 using namespace RISCV;
745 
746 #define GET_RISCVVIntrinsicsTable_DECL
747 #include "RISCVGenSearchableTables.inc"
748 
749 } // end namespace RISCVVIntrinsicsTable
750 
751 } // end namespace llvm
752 
753 #endif
754