xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (revision fe6060f10f634930ff71b7c50291ddc610da2475)
10b57cec5SDimitry Andric //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file defines an instruction selector for the RISCV target.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
120b57cec5SDimitry Andric 
135ffd83dbSDimitry Andric #include "RISCVISelDAGToDAG.h"
140b57cec5SDimitry Andric #include "MCTargetDesc/RISCVMCTargetDesc.h"
15e8d8bef9SDimitry Andric #include "MCTargetDesc/RISCVMatInt.h"
16*fe6060f1SDimitry Andric #include "RISCVISelLowering.h"
17*fe6060f1SDimitry Andric #include "RISCVMachineFunctionInfo.h"
180b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFrameInfo.h"
19e8d8bef9SDimitry Andric #include "llvm/IR/IntrinsicsRISCV.h"
205ffd83dbSDimitry Andric #include "llvm/Support/Alignment.h"
210b57cec5SDimitry Andric #include "llvm/Support/Debug.h"
22*fe6060f1SDimitry Andric #include "llvm/Support/KnownBits.h"
230b57cec5SDimitry Andric #include "llvm/Support/MathExtras.h"
240b57cec5SDimitry Andric #include "llvm/Support/raw_ostream.h"
255ffd83dbSDimitry Andric 
260b57cec5SDimitry Andric using namespace llvm;
270b57cec5SDimitry Andric 
280b57cec5SDimitry Andric #define DEBUG_TYPE "riscv-isel"
290b57cec5SDimitry Andric 
30*fe6060f1SDimitry Andric namespace llvm {
31*fe6060f1SDimitry Andric namespace RISCV {
32*fe6060f1SDimitry Andric #define GET_RISCVVSSEGTable_IMPL
33*fe6060f1SDimitry Andric #define GET_RISCVVLSEGTable_IMPL
34*fe6060f1SDimitry Andric #define GET_RISCVVLXSEGTable_IMPL
35*fe6060f1SDimitry Andric #define GET_RISCVVSXSEGTable_IMPL
36*fe6060f1SDimitry Andric #define GET_RISCVVLETable_IMPL
37*fe6060f1SDimitry Andric #define GET_RISCVVSETable_IMPL
38*fe6060f1SDimitry Andric #define GET_RISCVVLXTable_IMPL
39*fe6060f1SDimitry Andric #define GET_RISCVVSXTable_IMPL
40*fe6060f1SDimitry Andric #include "RISCVGenSearchableTables.inc"
41*fe6060f1SDimitry Andric } // namespace RISCV
42*fe6060f1SDimitry Andric } // namespace llvm
43*fe6060f1SDimitry Andric 
44*fe6060f1SDimitry Andric void RISCVDAGToDAGISel::PreprocessISelDAG() {
45*fe6060f1SDimitry Andric   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
46*fe6060f1SDimitry Andric                                        E = CurDAG->allnodes_end();
47*fe6060f1SDimitry Andric        I != E;) {
48*fe6060f1SDimitry Andric     SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
49*fe6060f1SDimitry Andric 
50*fe6060f1SDimitry Andric     // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
51*fe6060f1SDimitry Andric     // load. Done after lowering and combining so that we have a chance to
52*fe6060f1SDimitry Andric     // optimize this to VMV_V_X_VL when the upper bits aren't needed.
53*fe6060f1SDimitry Andric     if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
54*fe6060f1SDimitry Andric       continue;
55*fe6060f1SDimitry Andric 
56*fe6060f1SDimitry Andric     assert(N->getNumOperands() == 3 && "Unexpected number of operands");
57*fe6060f1SDimitry Andric     MVT VT = N->getSimpleValueType(0);
58*fe6060f1SDimitry Andric     SDValue Lo = N->getOperand(0);
59*fe6060f1SDimitry Andric     SDValue Hi = N->getOperand(1);
60*fe6060f1SDimitry Andric     SDValue VL = N->getOperand(2);
61*fe6060f1SDimitry Andric     assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
62*fe6060f1SDimitry Andric            Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
63*fe6060f1SDimitry Andric            "Unexpected VTs!");
64*fe6060f1SDimitry Andric     MachineFunction &MF = CurDAG->getMachineFunction();
65*fe6060f1SDimitry Andric     RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
66*fe6060f1SDimitry Andric     SDLoc DL(N);
67*fe6060f1SDimitry Andric 
68*fe6060f1SDimitry Andric     // We use the same frame index we use for moving two i32s into 64-bit FPR.
69*fe6060f1SDimitry Andric     // This is an analogous operation.
70*fe6060f1SDimitry Andric     int FI = FuncInfo->getMoveF64FrameIndex(MF);
71*fe6060f1SDimitry Andric     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
72*fe6060f1SDimitry Andric     const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
73*fe6060f1SDimitry Andric     SDValue StackSlot =
74*fe6060f1SDimitry Andric         CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
75*fe6060f1SDimitry Andric 
76*fe6060f1SDimitry Andric     SDValue Chain = CurDAG->getEntryNode();
77*fe6060f1SDimitry Andric     Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
78*fe6060f1SDimitry Andric 
79*fe6060f1SDimitry Andric     SDValue OffsetSlot =
80*fe6060f1SDimitry Andric         CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
81*fe6060f1SDimitry Andric     Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
82*fe6060f1SDimitry Andric                           Align(8));
83*fe6060f1SDimitry Andric 
84*fe6060f1SDimitry Andric     Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
85*fe6060f1SDimitry Andric 
86*fe6060f1SDimitry Andric     SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
87*fe6060f1SDimitry Andric     SDValue IntID =
88*fe6060f1SDimitry Andric         CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
89*fe6060f1SDimitry Andric     SDValue Ops[] = {Chain, IntID, StackSlot,
90*fe6060f1SDimitry Andric                      CurDAG->getRegister(RISCV::X0, MVT::i64), VL};
91*fe6060f1SDimitry Andric 
92*fe6060f1SDimitry Andric     SDValue Result = CurDAG->getMemIntrinsicNode(
93*fe6060f1SDimitry Andric         ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
94*fe6060f1SDimitry Andric         MachineMemOperand::MOLoad);
95*fe6060f1SDimitry Andric 
96*fe6060f1SDimitry Andric     // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
97*fe6060f1SDimitry Andric     // vlse we created.  This will cause general havok on the dag because
98*fe6060f1SDimitry Andric     // anything below the conversion could be folded into other existing nodes.
99*fe6060f1SDimitry Andric     // To avoid invalidating 'I', back it up to the convert node.
100*fe6060f1SDimitry Andric     --I;
101*fe6060f1SDimitry Andric     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
102*fe6060f1SDimitry Andric 
103*fe6060f1SDimitry Andric     // Now that we did that, the node is dead.  Increment the iterator to the
104*fe6060f1SDimitry Andric     // next node to process, then delete N.
105*fe6060f1SDimitry Andric     ++I;
106*fe6060f1SDimitry Andric     CurDAG->DeleteNode(N);
107*fe6060f1SDimitry Andric   }
108*fe6060f1SDimitry Andric }
109*fe6060f1SDimitry Andric 
1100b57cec5SDimitry Andric void RISCVDAGToDAGISel::PostprocessISelDAG() {
1110b57cec5SDimitry Andric   doPeepholeLoadStoreADDI();
1120b57cec5SDimitry Andric }
1130b57cec5SDimitry Andric 
1140b57cec5SDimitry Andric static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
115*fe6060f1SDimitry Andric                          const RISCVSubtarget &Subtarget) {
116*fe6060f1SDimitry Andric   MVT XLenVT = Subtarget.getXLenVT();
117*fe6060f1SDimitry Andric   RISCVMatInt::InstSeq Seq =
118*fe6060f1SDimitry Andric       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
1190b57cec5SDimitry Andric 
1208bcb0991SDimitry Andric   SDNode *Result = nullptr;
1210b57cec5SDimitry Andric   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
1220b57cec5SDimitry Andric   for (RISCVMatInt::Inst &Inst : Seq) {
1230b57cec5SDimitry Andric     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
1240b57cec5SDimitry Andric     if (Inst.Opc == RISCV::LUI)
1250b57cec5SDimitry Andric       Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
126*fe6060f1SDimitry Andric     else if (Inst.Opc == RISCV::ADDUW)
127*fe6060f1SDimitry Andric       Result = CurDAG->getMachineNode(RISCV::ADDUW, DL, XLenVT, SrcReg,
128*fe6060f1SDimitry Andric                                       CurDAG->getRegister(RISCV::X0, XLenVT));
1290b57cec5SDimitry Andric     else
1300b57cec5SDimitry Andric       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
1310b57cec5SDimitry Andric 
1320b57cec5SDimitry Andric     // Only the first instruction has X0 as its source.
1330b57cec5SDimitry Andric     SrcReg = SDValue(Result, 0);
1340b57cec5SDimitry Andric   }
1350b57cec5SDimitry Andric 
1360b57cec5SDimitry Andric   return Result;
1370b57cec5SDimitry Andric }
1380b57cec5SDimitry Andric 
139e8d8bef9SDimitry Andric static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
140e8d8bef9SDimitry Andric                                unsigned RegClassID, unsigned SubReg0) {
141e8d8bef9SDimitry Andric   assert(Regs.size() >= 2 && Regs.size() <= 8);
142e8d8bef9SDimitry Andric 
143e8d8bef9SDimitry Andric   SDLoc DL(Regs[0]);
144e8d8bef9SDimitry Andric   SmallVector<SDValue, 8> Ops;
145e8d8bef9SDimitry Andric 
146e8d8bef9SDimitry Andric   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
147e8d8bef9SDimitry Andric 
148e8d8bef9SDimitry Andric   for (unsigned I = 0; I < Regs.size(); ++I) {
149e8d8bef9SDimitry Andric     Ops.push_back(Regs[I]);
150e8d8bef9SDimitry Andric     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
151e8d8bef9SDimitry Andric   }
152e8d8bef9SDimitry Andric   SDNode *N =
153e8d8bef9SDimitry Andric       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
154e8d8bef9SDimitry Andric   return SDValue(N, 0);
155e8d8bef9SDimitry Andric }
156e8d8bef9SDimitry Andric 
157e8d8bef9SDimitry Andric static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
158e8d8bef9SDimitry Andric                              unsigned NF) {
159e8d8bef9SDimitry Andric   static const unsigned RegClassIDs[] = {
160e8d8bef9SDimitry Andric       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
161e8d8bef9SDimitry Andric       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
162e8d8bef9SDimitry Andric       RISCV::VRN8M1RegClassID};
163e8d8bef9SDimitry Andric 
164e8d8bef9SDimitry Andric   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
165e8d8bef9SDimitry Andric }
166e8d8bef9SDimitry Andric 
167e8d8bef9SDimitry Andric static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
168e8d8bef9SDimitry Andric                              unsigned NF) {
169e8d8bef9SDimitry Andric   static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
170e8d8bef9SDimitry Andric                                          RISCV::VRN3M2RegClassID,
171e8d8bef9SDimitry Andric                                          RISCV::VRN4M2RegClassID};
172e8d8bef9SDimitry Andric 
173e8d8bef9SDimitry Andric   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
174e8d8bef9SDimitry Andric }
175e8d8bef9SDimitry Andric 
176e8d8bef9SDimitry Andric static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
177e8d8bef9SDimitry Andric                              unsigned NF) {
178e8d8bef9SDimitry Andric   return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
179e8d8bef9SDimitry Andric                          RISCV::sub_vrm4_0);
180e8d8bef9SDimitry Andric }
181e8d8bef9SDimitry Andric 
182e8d8bef9SDimitry Andric static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
183*fe6060f1SDimitry Andric                            unsigned NF, RISCVII::VLMUL LMUL) {
184e8d8bef9SDimitry Andric   switch (LMUL) {
185e8d8bef9SDimitry Andric   default:
186e8d8bef9SDimitry Andric     llvm_unreachable("Invalid LMUL.");
187*fe6060f1SDimitry Andric   case RISCVII::VLMUL::LMUL_F8:
188*fe6060f1SDimitry Andric   case RISCVII::VLMUL::LMUL_F4:
189*fe6060f1SDimitry Andric   case RISCVII::VLMUL::LMUL_F2:
190*fe6060f1SDimitry Andric   case RISCVII::VLMUL::LMUL_1:
191e8d8bef9SDimitry Andric     return createM1Tuple(CurDAG, Regs, NF);
192*fe6060f1SDimitry Andric   case RISCVII::VLMUL::LMUL_2:
193e8d8bef9SDimitry Andric     return createM2Tuple(CurDAG, Regs, NF);
194*fe6060f1SDimitry Andric   case RISCVII::VLMUL::LMUL_4:
195e8d8bef9SDimitry Andric     return createM4Tuple(CurDAG, Regs, NF);
196e8d8bef9SDimitry Andric   }
197e8d8bef9SDimitry Andric }
198e8d8bef9SDimitry Andric 
199*fe6060f1SDimitry Andric void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
200*fe6060f1SDimitry Andric     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
201*fe6060f1SDimitry Andric     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
202*fe6060f1SDimitry Andric     MVT *IndexVT) {
203*fe6060f1SDimitry Andric   SDValue Chain = Node->getOperand(0);
204*fe6060f1SDimitry Andric   SDValue Glue;
205*fe6060f1SDimitry Andric 
206*fe6060f1SDimitry Andric   SDValue Base;
207*fe6060f1SDimitry Andric   SelectBaseAddr(Node->getOperand(CurOp++), Base);
208*fe6060f1SDimitry Andric   Operands.push_back(Base); // Base pointer.
209*fe6060f1SDimitry Andric 
210*fe6060f1SDimitry Andric   if (IsStridedOrIndexed) {
211*fe6060f1SDimitry Andric     Operands.push_back(Node->getOperand(CurOp++)); // Index.
212*fe6060f1SDimitry Andric     if (IndexVT)
213*fe6060f1SDimitry Andric       *IndexVT = Operands.back()->getSimpleValueType(0);
214*fe6060f1SDimitry Andric   }
215*fe6060f1SDimitry Andric 
216*fe6060f1SDimitry Andric   if (IsMasked) {
217*fe6060f1SDimitry Andric     // Mask needs to be copied to V0.
218*fe6060f1SDimitry Andric     SDValue Mask = Node->getOperand(CurOp++);
219*fe6060f1SDimitry Andric     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
220*fe6060f1SDimitry Andric     Glue = Chain.getValue(1);
221*fe6060f1SDimitry Andric     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
222*fe6060f1SDimitry Andric   }
223*fe6060f1SDimitry Andric   SDValue VL;
224*fe6060f1SDimitry Andric   selectVLOp(Node->getOperand(CurOp++), VL);
225*fe6060f1SDimitry Andric   Operands.push_back(VL);
226*fe6060f1SDimitry Andric 
227*fe6060f1SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
228*fe6060f1SDimitry Andric   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
229*fe6060f1SDimitry Andric   Operands.push_back(SEWOp);
230*fe6060f1SDimitry Andric 
231*fe6060f1SDimitry Andric   Operands.push_back(Chain); // Chain.
232*fe6060f1SDimitry Andric   if (Glue)
233*fe6060f1SDimitry Andric     Operands.push_back(Glue);
234*fe6060f1SDimitry Andric }
235*fe6060f1SDimitry Andric 
236*fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
237e8d8bef9SDimitry Andric                                     bool IsStrided) {
238e8d8bef9SDimitry Andric   SDLoc DL(Node);
239e8d8bef9SDimitry Andric   unsigned NF = Node->getNumValues() - 1;
240*fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
241*fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
242*fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
243e8d8bef9SDimitry Andric 
244*fe6060f1SDimitry Andric   unsigned CurOp = 2;
245*fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
246*fe6060f1SDimitry Andric   if (IsMasked) {
247*fe6060f1SDimitry Andric     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
248*fe6060f1SDimitry Andric                                  Node->op_begin() + CurOp + NF);
249e8d8bef9SDimitry Andric     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
250e8d8bef9SDimitry Andric     Operands.push_back(MaskedOff);
251*fe6060f1SDimitry Andric     CurOp += NF;
252e8d8bef9SDimitry Andric   }
253*fe6060f1SDimitry Andric 
254*fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
255*fe6060f1SDimitry Andric                              Operands);
256*fe6060f1SDimitry Andric 
257*fe6060f1SDimitry Andric   const RISCV::VLSEGPseudo *P =
258*fe6060f1SDimitry Andric       RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
259*fe6060f1SDimitry Andric                             static_cast<unsigned>(LMUL));
260*fe6060f1SDimitry Andric   MachineSDNode *Load =
261e8d8bef9SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
262*fe6060f1SDimitry Andric 
263*fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
264*fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
265*fe6060f1SDimitry Andric 
266e8d8bef9SDimitry Andric   SDValue SuperReg = SDValue(Load, 0);
267*fe6060f1SDimitry Andric   for (unsigned I = 0; I < NF; ++I) {
268*fe6060f1SDimitry Andric     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
269e8d8bef9SDimitry Andric     ReplaceUses(SDValue(Node, I),
270*fe6060f1SDimitry Andric                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
271*fe6060f1SDimitry Andric   }
272e8d8bef9SDimitry Andric 
273e8d8bef9SDimitry Andric   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
274e8d8bef9SDimitry Andric   CurDAG->RemoveDeadNode(Node);
275e8d8bef9SDimitry Andric }
276e8d8bef9SDimitry Andric 
277*fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
278e8d8bef9SDimitry Andric   SDLoc DL(Node);
279*fe6060f1SDimitry Andric   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
280*fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
281e8d8bef9SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
282*fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
283*fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
284e8d8bef9SDimitry Andric 
285*fe6060f1SDimitry Andric   unsigned CurOp = 2;
286e8d8bef9SDimitry Andric   SmallVector<SDValue, 7> Operands;
287*fe6060f1SDimitry Andric   if (IsMasked) {
288*fe6060f1SDimitry Andric     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
289*fe6060f1SDimitry Andric                                  Node->op_begin() + CurOp + NF);
290e8d8bef9SDimitry Andric     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
291*fe6060f1SDimitry Andric     Operands.push_back(MaskedOff);
292*fe6060f1SDimitry Andric     CurOp += NF;
293*fe6060f1SDimitry Andric   }
294e8d8bef9SDimitry Andric 
295*fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
296*fe6060f1SDimitry Andric                              /*IsStridedOrIndexed*/ false, Operands);
297*fe6060f1SDimitry Andric 
298*fe6060f1SDimitry Andric   const RISCV::VLSEGPseudo *P =
299*fe6060f1SDimitry Andric       RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
300*fe6060f1SDimitry Andric                             Log2SEW, static_cast<unsigned>(LMUL));
301*fe6060f1SDimitry Andric   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
302*fe6060f1SDimitry Andric                                                MVT::Other, MVT::Glue, Operands);
303*fe6060f1SDimitry Andric   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
304*fe6060f1SDimitry Andric                                           /*Glue*/ SDValue(Load, 2));
305*fe6060f1SDimitry Andric 
306*fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
307*fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
308*fe6060f1SDimitry Andric 
309e8d8bef9SDimitry Andric   SDValue SuperReg = SDValue(Load, 0);
310*fe6060f1SDimitry Andric   for (unsigned I = 0; I < NF; ++I) {
311*fe6060f1SDimitry Andric     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
312e8d8bef9SDimitry Andric     ReplaceUses(SDValue(Node, I),
313*fe6060f1SDimitry Andric                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
314*fe6060f1SDimitry Andric   }
315*fe6060f1SDimitry Andric 
316*fe6060f1SDimitry Andric   ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
317*fe6060f1SDimitry Andric   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
318*fe6060f1SDimitry Andric   CurDAG->RemoveDeadNode(Node);
319*fe6060f1SDimitry Andric }
320*fe6060f1SDimitry Andric 
321*fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
322*fe6060f1SDimitry Andric                                      bool IsOrdered) {
323*fe6060f1SDimitry Andric   SDLoc DL(Node);
324*fe6060f1SDimitry Andric   unsigned NF = Node->getNumValues() - 1;
325*fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
326*fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
327*fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
328*fe6060f1SDimitry Andric 
329*fe6060f1SDimitry Andric   unsigned CurOp = 2;
330*fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
331*fe6060f1SDimitry Andric   if (IsMasked) {
332*fe6060f1SDimitry Andric     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
333*fe6060f1SDimitry Andric                                  Node->op_begin() + CurOp + NF);
334*fe6060f1SDimitry Andric     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
335*fe6060f1SDimitry Andric     Operands.push_back(MaskedOff);
336*fe6060f1SDimitry Andric     CurOp += NF;
337*fe6060f1SDimitry Andric   }
338*fe6060f1SDimitry Andric 
339*fe6060f1SDimitry Andric   MVT IndexVT;
340*fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
341*fe6060f1SDimitry Andric                              /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
342*fe6060f1SDimitry Andric 
343*fe6060f1SDimitry Andric   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
344*fe6060f1SDimitry Andric          "Element count mismatch");
345*fe6060f1SDimitry Andric 
346*fe6060f1SDimitry Andric   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
347*fe6060f1SDimitry Andric   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
348*fe6060f1SDimitry Andric   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
349*fe6060f1SDimitry Andric       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
350*fe6060f1SDimitry Andric       static_cast<unsigned>(IndexLMUL));
351*fe6060f1SDimitry Andric   MachineSDNode *Load =
352*fe6060f1SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
353*fe6060f1SDimitry Andric 
354*fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
355*fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
356*fe6060f1SDimitry Andric 
357*fe6060f1SDimitry Andric   SDValue SuperReg = SDValue(Load, 0);
358*fe6060f1SDimitry Andric   for (unsigned I = 0; I < NF; ++I) {
359*fe6060f1SDimitry Andric     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
360*fe6060f1SDimitry Andric     ReplaceUses(SDValue(Node, I),
361*fe6060f1SDimitry Andric                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
362*fe6060f1SDimitry Andric   }
363e8d8bef9SDimitry Andric 
364e8d8bef9SDimitry Andric   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
365e8d8bef9SDimitry Andric   CurDAG->RemoveDeadNode(Node);
366e8d8bef9SDimitry Andric }
367e8d8bef9SDimitry Andric 
368*fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
369e8d8bef9SDimitry Andric                                     bool IsStrided) {
370e8d8bef9SDimitry Andric   SDLoc DL(Node);
371e8d8bef9SDimitry Andric   unsigned NF = Node->getNumOperands() - 4;
372e8d8bef9SDimitry Andric   if (IsStrided)
373e8d8bef9SDimitry Andric     NF--;
374*fe6060f1SDimitry Andric   if (IsMasked)
375e8d8bef9SDimitry Andric     NF--;
376*fe6060f1SDimitry Andric   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
377*fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
378*fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
379e8d8bef9SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
380e8d8bef9SDimitry Andric   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
381*fe6060f1SDimitry Andric 
382*fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
383e8d8bef9SDimitry Andric   Operands.push_back(StoreVal);
384*fe6060f1SDimitry Andric   unsigned CurOp = 2 + NF;
385*fe6060f1SDimitry Andric 
386*fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
387*fe6060f1SDimitry Andric                              Operands);
388*fe6060f1SDimitry Andric 
389*fe6060f1SDimitry Andric   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
390*fe6060f1SDimitry Andric       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
391*fe6060f1SDimitry Andric   MachineSDNode *Store =
392e8d8bef9SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
393*fe6060f1SDimitry Andric 
394*fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
395*fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
396*fe6060f1SDimitry Andric 
397e8d8bef9SDimitry Andric   ReplaceNode(Node, Store);
398e8d8bef9SDimitry Andric }
399e8d8bef9SDimitry Andric 
400*fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
401*fe6060f1SDimitry Andric                                      bool IsOrdered) {
402e8d8bef9SDimitry Andric   SDLoc DL(Node);
403e8d8bef9SDimitry Andric   unsigned NF = Node->getNumOperands() - 5;
404*fe6060f1SDimitry Andric   if (IsMasked)
405*fe6060f1SDimitry Andric     --NF;
406*fe6060f1SDimitry Andric   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
407*fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
408*fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
409e8d8bef9SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
410e8d8bef9SDimitry Andric   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
411e8d8bef9SDimitry Andric 
412*fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
413*fe6060f1SDimitry Andric   Operands.push_back(StoreVal);
414*fe6060f1SDimitry Andric   unsigned CurOp = 2 + NF;
415*fe6060f1SDimitry Andric 
416*fe6060f1SDimitry Andric   MVT IndexVT;
417*fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
418*fe6060f1SDimitry Andric                              /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
419*fe6060f1SDimitry Andric 
420*fe6060f1SDimitry Andric   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
421*fe6060f1SDimitry Andric          "Element count mismatch");
422*fe6060f1SDimitry Andric 
423*fe6060f1SDimitry Andric   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
424*fe6060f1SDimitry Andric   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
425*fe6060f1SDimitry Andric   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
426*fe6060f1SDimitry Andric       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
427e8d8bef9SDimitry Andric       static_cast<unsigned>(IndexLMUL));
428*fe6060f1SDimitry Andric   MachineSDNode *Store =
429e8d8bef9SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
430*fe6060f1SDimitry Andric 
431*fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
432*fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
433*fe6060f1SDimitry Andric 
434e8d8bef9SDimitry Andric   ReplaceNode(Node, Store);
435e8d8bef9SDimitry Andric }
436e8d8bef9SDimitry Andric 
4370b57cec5SDimitry Andric 
4380b57cec5SDimitry Andric void RISCVDAGToDAGISel::Select(SDNode *Node) {
4390b57cec5SDimitry Andric   // If we have a custom node, we have already selected.
4400b57cec5SDimitry Andric   if (Node->isMachineOpcode()) {
4410b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
4420b57cec5SDimitry Andric     Node->setNodeId(-1);
4430b57cec5SDimitry Andric     return;
4440b57cec5SDimitry Andric   }
4450b57cec5SDimitry Andric 
4460b57cec5SDimitry Andric   // Instruction Selection not handled by the auto-generated tablegen selection
4470b57cec5SDimitry Andric   // should be handled here.
4480b57cec5SDimitry Andric   unsigned Opcode = Node->getOpcode();
4490b57cec5SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
4500b57cec5SDimitry Andric   SDLoc DL(Node);
451*fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
4520b57cec5SDimitry Andric 
4530b57cec5SDimitry Andric   switch (Opcode) {
4540b57cec5SDimitry Andric   case ISD::Constant: {
455*fe6060f1SDimitry Andric     auto *ConstNode = cast<ConstantSDNode>(Node);
4560b57cec5SDimitry Andric     if (VT == XLenVT && ConstNode->isNullValue()) {
457e8d8bef9SDimitry Andric       SDValue New =
458e8d8bef9SDimitry Andric           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
4590b57cec5SDimitry Andric       ReplaceNode(Node, New.getNode());
4600b57cec5SDimitry Andric       return;
4610b57cec5SDimitry Andric     }
462*fe6060f1SDimitry Andric     ReplaceNode(Node,
463*fe6060f1SDimitry Andric                 selectImm(CurDAG, DL, ConstNode->getSExtValue(), *Subtarget));
4640b57cec5SDimitry Andric     return;
4650b57cec5SDimitry Andric   }
4660b57cec5SDimitry Andric   case ISD::FrameIndex: {
4670b57cec5SDimitry Andric     SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
4680b57cec5SDimitry Andric     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
4690b57cec5SDimitry Andric     SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
4700b57cec5SDimitry Andric     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
4710b57cec5SDimitry Andric     return;
4720b57cec5SDimitry Andric   }
473*fe6060f1SDimitry Andric   case ISD::SRL: {
474*fe6060f1SDimitry Andric     // We don't need this transform if zext.h is supported.
475*fe6060f1SDimitry Andric     if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
476*fe6060f1SDimitry Andric       break;
477*fe6060f1SDimitry Andric     // Optimize (srl (and X, 0xffff), C) ->
478*fe6060f1SDimitry Andric     //          (srli (slli X, (XLen-16), (XLen-16) + C)
479*fe6060f1SDimitry Andric     // Taking into account that the 0xffff may have had lower bits unset by
480*fe6060f1SDimitry Andric     // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
481*fe6060f1SDimitry Andric     // This pattern occurs when type legalizing i16 right shifts.
482*fe6060f1SDimitry Andric     // FIXME: This could be extended to other AND masks.
483*fe6060f1SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
484*fe6060f1SDimitry Andric     if (N1C) {
485*fe6060f1SDimitry Andric       uint64_t ShAmt = N1C->getZExtValue();
486*fe6060f1SDimitry Andric       SDValue N0 = Node->getOperand(0);
487*fe6060f1SDimitry Andric       if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
488*fe6060f1SDimitry Andric           isa<ConstantSDNode>(N0.getOperand(1))) {
489*fe6060f1SDimitry Andric         uint64_t Mask = N0.getConstantOperandVal(1);
490*fe6060f1SDimitry Andric         Mask |= maskTrailingOnes<uint64_t>(ShAmt);
491*fe6060f1SDimitry Andric         if (Mask == 0xffff) {
492*fe6060f1SDimitry Andric           unsigned LShAmt = Subtarget->getXLen() - 16;
493*fe6060f1SDimitry Andric           SDNode *SLLI =
494*fe6060f1SDimitry Andric               CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
495*fe6060f1SDimitry Andric                                      CurDAG->getTargetConstant(LShAmt, DL, VT));
496*fe6060f1SDimitry Andric           SDNode *SRLI = CurDAG->getMachineNode(
497*fe6060f1SDimitry Andric               RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
498*fe6060f1SDimitry Andric               CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
499*fe6060f1SDimitry Andric           ReplaceNode(Node, SRLI);
500*fe6060f1SDimitry Andric           return;
501*fe6060f1SDimitry Andric         }
502*fe6060f1SDimitry Andric       }
503*fe6060f1SDimitry Andric     }
504*fe6060f1SDimitry Andric 
505*fe6060f1SDimitry Andric     break;
506*fe6060f1SDimitry Andric   }
507*fe6060f1SDimitry Andric   case ISD::AND: {
508*fe6060f1SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
509*fe6060f1SDimitry Andric     if (!N1C)
510*fe6060f1SDimitry Andric       break;
511*fe6060f1SDimitry Andric 
512*fe6060f1SDimitry Andric     SDValue N0 = Node->getOperand(0);
513*fe6060f1SDimitry Andric 
514*fe6060f1SDimitry Andric     bool LeftShift = N0.getOpcode() == ISD::SHL;
515*fe6060f1SDimitry Andric     if (!LeftShift && N0.getOpcode() != ISD::SRL)
516*fe6060f1SDimitry Andric       break;
517*fe6060f1SDimitry Andric 
518*fe6060f1SDimitry Andric     auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
519*fe6060f1SDimitry Andric     if (!C)
520*fe6060f1SDimitry Andric       break;
521*fe6060f1SDimitry Andric     uint64_t C2 = C->getZExtValue();
522*fe6060f1SDimitry Andric     unsigned XLen = Subtarget->getXLen();
523*fe6060f1SDimitry Andric     if (!C2 || C2 >= XLen)
524*fe6060f1SDimitry Andric       break;
525*fe6060f1SDimitry Andric 
526*fe6060f1SDimitry Andric     uint64_t C1 = N1C->getZExtValue();
527*fe6060f1SDimitry Andric 
528*fe6060f1SDimitry Andric     // Keep track of whether this is a andi, zext.h, or zext.w.
529*fe6060f1SDimitry Andric     bool ZExtOrANDI = isInt<12>(N1C->getSExtValue());
530*fe6060f1SDimitry Andric     if (C1 == UINT64_C(0xFFFF) &&
531*fe6060f1SDimitry Andric         (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp()))
532*fe6060f1SDimitry Andric       ZExtOrANDI = true;
533*fe6060f1SDimitry Andric     if (C1 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba())
534*fe6060f1SDimitry Andric       ZExtOrANDI = true;
535*fe6060f1SDimitry Andric 
536*fe6060f1SDimitry Andric     // Clear irrelevant bits in the mask.
537*fe6060f1SDimitry Andric     if (LeftShift)
538*fe6060f1SDimitry Andric       C1 &= maskTrailingZeros<uint64_t>(C2);
539*fe6060f1SDimitry Andric     else
540*fe6060f1SDimitry Andric       C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
541*fe6060f1SDimitry Andric 
542*fe6060f1SDimitry Andric     // Some transforms should only be done if the shift has a single use or
543*fe6060f1SDimitry Andric     // the AND would become (srli (slli X, 32), 32)
544*fe6060f1SDimitry Andric     bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
545*fe6060f1SDimitry Andric 
546*fe6060f1SDimitry Andric     SDValue X = N0.getOperand(0);
547*fe6060f1SDimitry Andric 
548*fe6060f1SDimitry Andric     // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
549*fe6060f1SDimitry Andric     // with c3 leading zeros.
550*fe6060f1SDimitry Andric     if (!LeftShift && isMask_64(C1)) {
551*fe6060f1SDimitry Andric       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
552*fe6060f1SDimitry Andric       if (C2 < C3) {
553*fe6060f1SDimitry Andric         // If the number of leading zeros is C2+32 this can be SRLIW.
554*fe6060f1SDimitry Andric         if (C2 + 32 == C3) {
555*fe6060f1SDimitry Andric           SDNode *SRLIW =
556*fe6060f1SDimitry Andric               CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
557*fe6060f1SDimitry Andric                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
558*fe6060f1SDimitry Andric           ReplaceNode(Node, SRLIW);
559*fe6060f1SDimitry Andric           return;
560*fe6060f1SDimitry Andric         }
561*fe6060f1SDimitry Andric 
562*fe6060f1SDimitry Andric         // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
563*fe6060f1SDimitry Andric         // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
564*fe6060f1SDimitry Andric         //
565*fe6060f1SDimitry Andric         // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
566*fe6060f1SDimitry Andric         // legalized and goes through DAG combine.
567*fe6060f1SDimitry Andric         SDValue Y;
568*fe6060f1SDimitry Andric         if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
569*fe6060f1SDimitry Andric             selectSExti32(X, Y)) {
570*fe6060f1SDimitry Andric           SDNode *SRAIW =
571*fe6060f1SDimitry Andric               CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, Y,
572*fe6060f1SDimitry Andric                                      CurDAG->getTargetConstant(31, DL, XLenVT));
573*fe6060f1SDimitry Andric           SDNode *SRLIW = CurDAG->getMachineNode(
574*fe6060f1SDimitry Andric               RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
575*fe6060f1SDimitry Andric               CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
576*fe6060f1SDimitry Andric           ReplaceNode(Node, SRLIW);
577*fe6060f1SDimitry Andric           return;
578*fe6060f1SDimitry Andric         }
579*fe6060f1SDimitry Andric 
580*fe6060f1SDimitry Andric         // (srli (slli x, c3-c2), c3).
581*fe6060f1SDimitry Andric         if (OneUseOrZExtW && !ZExtOrANDI) {
582*fe6060f1SDimitry Andric           SDNode *SLLI = CurDAG->getMachineNode(
583*fe6060f1SDimitry Andric               RISCV::SLLI, DL, XLenVT, X,
584*fe6060f1SDimitry Andric               CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
585*fe6060f1SDimitry Andric           SDNode *SRLI =
586*fe6060f1SDimitry Andric               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
587*fe6060f1SDimitry Andric                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
588*fe6060f1SDimitry Andric           ReplaceNode(Node, SRLI);
589*fe6060f1SDimitry Andric           return;
590*fe6060f1SDimitry Andric         }
591*fe6060f1SDimitry Andric       }
592*fe6060f1SDimitry Andric     }
593*fe6060f1SDimitry Andric 
594*fe6060f1SDimitry Andric     // Turn (and (shl x, c2) c1) -> (srli (slli c2+c3), c3) if c1 is a mask
595*fe6060f1SDimitry Andric     // shifted by c2 bits with c3 leading zeros.
596*fe6060f1SDimitry Andric     if (LeftShift && isShiftedMask_64(C1)) {
597*fe6060f1SDimitry Andric       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
598*fe6060f1SDimitry Andric 
599*fe6060f1SDimitry Andric       if (C2 + C3 < XLen &&
600*fe6060f1SDimitry Andric           C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
601*fe6060f1SDimitry Andric         // Use slli.uw when possible.
602*fe6060f1SDimitry Andric         if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
603*fe6060f1SDimitry Andric           SDNode *SLLIUW =
604*fe6060f1SDimitry Andric               CurDAG->getMachineNode(RISCV::SLLIUW, DL, XLenVT, X,
605*fe6060f1SDimitry Andric                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
606*fe6060f1SDimitry Andric           ReplaceNode(Node, SLLIUW);
607*fe6060f1SDimitry Andric           return;
608*fe6060f1SDimitry Andric         }
609*fe6060f1SDimitry Andric 
610*fe6060f1SDimitry Andric         // (srli (slli c2+c3), c3)
611*fe6060f1SDimitry Andric         if (OneUseOrZExtW && !ZExtOrANDI) {
612*fe6060f1SDimitry Andric           SDNode *SLLI = CurDAG->getMachineNode(
613*fe6060f1SDimitry Andric               RISCV::SLLI, DL, XLenVT, X,
614*fe6060f1SDimitry Andric               CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
615*fe6060f1SDimitry Andric           SDNode *SRLI =
616*fe6060f1SDimitry Andric               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
617*fe6060f1SDimitry Andric                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
618*fe6060f1SDimitry Andric           ReplaceNode(Node, SRLI);
619*fe6060f1SDimitry Andric           return;
620*fe6060f1SDimitry Andric         }
621*fe6060f1SDimitry Andric       }
622*fe6060f1SDimitry Andric     }
623*fe6060f1SDimitry Andric 
624*fe6060f1SDimitry Andric     break;
625*fe6060f1SDimitry Andric   }
626*fe6060f1SDimitry Andric   case ISD::INTRINSIC_WO_CHAIN: {
627*fe6060f1SDimitry Andric     unsigned IntNo = Node->getConstantOperandVal(0);
628*fe6060f1SDimitry Andric     switch (IntNo) {
629*fe6060f1SDimitry Andric       // By default we do not custom select any intrinsic.
630*fe6060f1SDimitry Andric     default:
631*fe6060f1SDimitry Andric       break;
632*fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsgeu:
633*fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsge: {
634*fe6060f1SDimitry Andric       SDValue Src1 = Node->getOperand(1);
635*fe6060f1SDimitry Andric       SDValue Src2 = Node->getOperand(2);
636*fe6060f1SDimitry Andric       // Only custom select scalar second operand.
637*fe6060f1SDimitry Andric       if (Src2.getValueType() != XLenVT)
638*fe6060f1SDimitry Andric         break;
639*fe6060f1SDimitry Andric       // Small constants are handled with patterns.
640*fe6060f1SDimitry Andric       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
641*fe6060f1SDimitry Andric         int64_t CVal = C->getSExtValue();
642*fe6060f1SDimitry Andric         if (CVal >= -15 && CVal <= 16)
643*fe6060f1SDimitry Andric           break;
644*fe6060f1SDimitry Andric       }
645*fe6060f1SDimitry Andric       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
646*fe6060f1SDimitry Andric       MVT Src1VT = Src1.getSimpleValueType();
647*fe6060f1SDimitry Andric       unsigned VMSLTOpcode, VMNANDOpcode;
648*fe6060f1SDimitry Andric       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
649*fe6060f1SDimitry Andric       default:
650*fe6060f1SDimitry Andric         llvm_unreachable("Unexpected LMUL!");
651*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_F8:
652*fe6060f1SDimitry Andric         VMSLTOpcode =
653*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
654*fe6060f1SDimitry Andric         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
655*fe6060f1SDimitry Andric         break;
656*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_F4:
657*fe6060f1SDimitry Andric         VMSLTOpcode =
658*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
659*fe6060f1SDimitry Andric         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
660*fe6060f1SDimitry Andric         break;
661*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_F2:
662*fe6060f1SDimitry Andric         VMSLTOpcode =
663*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
664*fe6060f1SDimitry Andric         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
665*fe6060f1SDimitry Andric         break;
666*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_1:
667*fe6060f1SDimitry Andric         VMSLTOpcode =
668*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
669*fe6060f1SDimitry Andric         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
670*fe6060f1SDimitry Andric         break;
671*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_2:
672*fe6060f1SDimitry Andric         VMSLTOpcode =
673*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
674*fe6060f1SDimitry Andric         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
675*fe6060f1SDimitry Andric         break;
676*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_4:
677*fe6060f1SDimitry Andric         VMSLTOpcode =
678*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
679*fe6060f1SDimitry Andric         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
680*fe6060f1SDimitry Andric         break;
681*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_8:
682*fe6060f1SDimitry Andric         VMSLTOpcode =
683*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
684*fe6060f1SDimitry Andric         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
685*fe6060f1SDimitry Andric         break;
686*fe6060f1SDimitry Andric       }
687*fe6060f1SDimitry Andric       SDValue SEW = CurDAG->getTargetConstant(
688*fe6060f1SDimitry Andric           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
689*fe6060f1SDimitry Andric       SDValue VL;
690*fe6060f1SDimitry Andric       selectVLOp(Node->getOperand(3), VL);
691*fe6060f1SDimitry Andric 
692*fe6060f1SDimitry Andric       // Expand to
693*fe6060f1SDimitry Andric       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
694*fe6060f1SDimitry Andric       SDValue Cmp = SDValue(
695*fe6060f1SDimitry Andric           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
696*fe6060f1SDimitry Andric           0);
697*fe6060f1SDimitry Andric       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
698*fe6060f1SDimitry Andric                                                {Cmp, Cmp, VL, SEW}));
699*fe6060f1SDimitry Andric       return;
700*fe6060f1SDimitry Andric     }
701*fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsgeu_mask:
702*fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsge_mask: {
703*fe6060f1SDimitry Andric       SDValue Src1 = Node->getOperand(2);
704*fe6060f1SDimitry Andric       SDValue Src2 = Node->getOperand(3);
705*fe6060f1SDimitry Andric       // Only custom select scalar second operand.
706*fe6060f1SDimitry Andric       if (Src2.getValueType() != XLenVT)
707*fe6060f1SDimitry Andric         break;
708*fe6060f1SDimitry Andric       // Small constants are handled with patterns.
709*fe6060f1SDimitry Andric       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
710*fe6060f1SDimitry Andric         int64_t CVal = C->getSExtValue();
711*fe6060f1SDimitry Andric         if (CVal >= -15 && CVal <= 16)
712*fe6060f1SDimitry Andric           break;
713*fe6060f1SDimitry Andric       }
714*fe6060f1SDimitry Andric       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
715*fe6060f1SDimitry Andric       MVT Src1VT = Src1.getSimpleValueType();
716*fe6060f1SDimitry Andric       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
717*fe6060f1SDimitry Andric       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
718*fe6060f1SDimitry Andric       default:
719*fe6060f1SDimitry Andric         llvm_unreachable("Unexpected LMUL!");
720*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_F8:
721*fe6060f1SDimitry Andric         VMSLTOpcode =
722*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
723*fe6060f1SDimitry Andric         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
724*fe6060f1SDimitry Andric                                      : RISCV::PseudoVMSLT_VX_MF8_MASK;
725*fe6060f1SDimitry Andric         break;
726*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_F4:
727*fe6060f1SDimitry Andric         VMSLTOpcode =
728*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
729*fe6060f1SDimitry Andric         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
730*fe6060f1SDimitry Andric                                      : RISCV::PseudoVMSLT_VX_MF4_MASK;
731*fe6060f1SDimitry Andric         break;
732*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_F2:
733*fe6060f1SDimitry Andric         VMSLTOpcode =
734*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
735*fe6060f1SDimitry Andric         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
736*fe6060f1SDimitry Andric                                      : RISCV::PseudoVMSLT_VX_MF2_MASK;
737*fe6060f1SDimitry Andric         break;
738*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_1:
739*fe6060f1SDimitry Andric         VMSLTOpcode =
740*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
741*fe6060f1SDimitry Andric         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
742*fe6060f1SDimitry Andric                                      : RISCV::PseudoVMSLT_VX_M1_MASK;
743*fe6060f1SDimitry Andric         break;
744*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_2:
745*fe6060f1SDimitry Andric         VMSLTOpcode =
746*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
747*fe6060f1SDimitry Andric         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
748*fe6060f1SDimitry Andric                                      : RISCV::PseudoVMSLT_VX_M2_MASK;
749*fe6060f1SDimitry Andric         break;
750*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_4:
751*fe6060f1SDimitry Andric         VMSLTOpcode =
752*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
753*fe6060f1SDimitry Andric         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
754*fe6060f1SDimitry Andric                                      : RISCV::PseudoVMSLT_VX_M4_MASK;
755*fe6060f1SDimitry Andric         break;
756*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_8:
757*fe6060f1SDimitry Andric         VMSLTOpcode =
758*fe6060f1SDimitry Andric             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
759*fe6060f1SDimitry Andric         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
760*fe6060f1SDimitry Andric                                      : RISCV::PseudoVMSLT_VX_M8_MASK;
761*fe6060f1SDimitry Andric         break;
762*fe6060f1SDimitry Andric       }
763*fe6060f1SDimitry Andric       // Mask operations use the LMUL from the mask type.
764*fe6060f1SDimitry Andric       switch (RISCVTargetLowering::getLMUL(VT)) {
765*fe6060f1SDimitry Andric       default:
766*fe6060f1SDimitry Andric         llvm_unreachable("Unexpected LMUL!");
767*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_F8:
768*fe6060f1SDimitry Andric         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
769*fe6060f1SDimitry Andric         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
770*fe6060f1SDimitry Andric         break;
771*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_F4:
772*fe6060f1SDimitry Andric         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
773*fe6060f1SDimitry Andric         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
774*fe6060f1SDimitry Andric         break;
775*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_F2:
776*fe6060f1SDimitry Andric         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
777*fe6060f1SDimitry Andric         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
778*fe6060f1SDimitry Andric         break;
779*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_1:
780*fe6060f1SDimitry Andric         VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
781*fe6060f1SDimitry Andric         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
782*fe6060f1SDimitry Andric         break;
783*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_2:
784*fe6060f1SDimitry Andric         VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
785*fe6060f1SDimitry Andric         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
786*fe6060f1SDimitry Andric         break;
787*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_4:
788*fe6060f1SDimitry Andric         VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
789*fe6060f1SDimitry Andric         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
790*fe6060f1SDimitry Andric         break;
791*fe6060f1SDimitry Andric       case RISCVII::VLMUL::LMUL_8:
792*fe6060f1SDimitry Andric         VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
793*fe6060f1SDimitry Andric         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
794*fe6060f1SDimitry Andric         break;
795*fe6060f1SDimitry Andric       }
796*fe6060f1SDimitry Andric       SDValue SEW = CurDAG->getTargetConstant(
797*fe6060f1SDimitry Andric           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
798*fe6060f1SDimitry Andric       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
799*fe6060f1SDimitry Andric       SDValue VL;
800*fe6060f1SDimitry Andric       selectVLOp(Node->getOperand(5), VL);
801*fe6060f1SDimitry Andric       SDValue MaskedOff = Node->getOperand(1);
802*fe6060f1SDimitry Andric       SDValue Mask = Node->getOperand(4);
803*fe6060f1SDimitry Andric       // If the MaskedOff value and the Mask are the same value use
804*fe6060f1SDimitry Andric       // vmslt{u}.vx vt, va, x;  vmandnot.mm vd, vd, vt
805*fe6060f1SDimitry Andric       // This avoids needing to copy v0 to vd before starting the next sequence.
806*fe6060f1SDimitry Andric       if (Mask == MaskedOff) {
807*fe6060f1SDimitry Andric         SDValue Cmp = SDValue(
808*fe6060f1SDimitry Andric             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
809*fe6060f1SDimitry Andric             0);
810*fe6060f1SDimitry Andric         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
811*fe6060f1SDimitry Andric                                                  {Mask, Cmp, VL, MaskSEW}));
812*fe6060f1SDimitry Andric         return;
813*fe6060f1SDimitry Andric       }
814*fe6060f1SDimitry Andric 
815*fe6060f1SDimitry Andric       // Mask needs to be copied to V0.
816*fe6060f1SDimitry Andric       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
817*fe6060f1SDimitry Andric                                            RISCV::V0, Mask, SDValue());
818*fe6060f1SDimitry Andric       SDValue Glue = Chain.getValue(1);
819*fe6060f1SDimitry Andric       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
820*fe6060f1SDimitry Andric 
821*fe6060f1SDimitry Andric       // Otherwise use
822*fe6060f1SDimitry Andric       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
823*fe6060f1SDimitry Andric       SDValue Cmp = SDValue(
824*fe6060f1SDimitry Andric           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
825*fe6060f1SDimitry Andric                                  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
826*fe6060f1SDimitry Andric           0);
827*fe6060f1SDimitry Andric       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
828*fe6060f1SDimitry Andric                                                {Cmp, Mask, VL, MaskSEW}));
829*fe6060f1SDimitry Andric       return;
830*fe6060f1SDimitry Andric     }
831*fe6060f1SDimitry Andric     }
832*fe6060f1SDimitry Andric     break;
833*fe6060f1SDimitry Andric   }
834e8d8bef9SDimitry Andric   case ISD::INTRINSIC_W_CHAIN: {
835e8d8bef9SDimitry Andric     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
836e8d8bef9SDimitry Andric     switch (IntNo) {
837e8d8bef9SDimitry Andric       // By default we do not custom select any intrinsic.
838e8d8bef9SDimitry Andric     default:
8390b57cec5SDimitry Andric       break;
8400b57cec5SDimitry Andric 
841*fe6060f1SDimitry Andric     case Intrinsic::riscv_vsetvli:
842*fe6060f1SDimitry Andric     case Intrinsic::riscv_vsetvlimax: {
843e8d8bef9SDimitry Andric       if (!Subtarget->hasStdExtV())
844e8d8bef9SDimitry Andric         break;
845e8d8bef9SDimitry Andric 
846*fe6060f1SDimitry Andric       bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
847*fe6060f1SDimitry Andric       unsigned Offset = VLMax ? 2 : 3;
848e8d8bef9SDimitry Andric 
849*fe6060f1SDimitry Andric       assert(Node->getNumOperands() == Offset + 2 &&
850*fe6060f1SDimitry Andric              "Unexpected number of operands");
851*fe6060f1SDimitry Andric 
852*fe6060f1SDimitry Andric       unsigned SEW =
853*fe6060f1SDimitry Andric           RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
854*fe6060f1SDimitry Andric       RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
855*fe6060f1SDimitry Andric           Node->getConstantOperandVal(Offset + 1) & 0x7);
856e8d8bef9SDimitry Andric 
857e8d8bef9SDimitry Andric       unsigned VTypeI = RISCVVType::encodeVTYPE(
858*fe6060f1SDimitry Andric           VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
859e8d8bef9SDimitry Andric       SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
860e8d8bef9SDimitry Andric 
861*fe6060f1SDimitry Andric       SDValue VLOperand;
862*fe6060f1SDimitry Andric       if (VLMax) {
863*fe6060f1SDimitry Andric         VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
864*fe6060f1SDimitry Andric       } else {
865*fe6060f1SDimitry Andric         VLOperand = Node->getOperand(2);
866*fe6060f1SDimitry Andric 
867e8d8bef9SDimitry Andric         if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
868d409305fSDimitry Andric           uint64_t AVL = C->getZExtValue();
869d409305fSDimitry Andric           if (isUInt<5>(AVL)) {
870d409305fSDimitry Andric             SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
871*fe6060f1SDimitry Andric             ReplaceNode(
872*fe6060f1SDimitry Andric                 Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
873d409305fSDimitry Andric                                              MVT::Other, VLImm, VTypeIOp,
874d409305fSDimitry Andric                                              /* Chain */ Node->getOperand(0)));
875d409305fSDimitry Andric             return;
876e8d8bef9SDimitry Andric           }
877e8d8bef9SDimitry Andric         }
878e8d8bef9SDimitry Andric       }
879e8d8bef9SDimitry Andric 
880e8d8bef9SDimitry Andric       ReplaceNode(Node,
881e8d8bef9SDimitry Andric                   CurDAG->getMachineNode(RISCV::PseudoVSETVLI, DL, XLenVT,
882e8d8bef9SDimitry Andric                                          MVT::Other, VLOperand, VTypeIOp,
883e8d8bef9SDimitry Andric                                          /* Chain */ Node->getOperand(0)));
884e8d8bef9SDimitry Andric       return;
885e8d8bef9SDimitry Andric     }
886e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg2:
887e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg3:
888e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg4:
889e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg5:
890e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg6:
891e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg7:
892e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg8: {
893*fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
894e8d8bef9SDimitry Andric       return;
895e8d8bef9SDimitry Andric     }
896e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg2_mask:
897e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg3_mask:
898e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg4_mask:
899e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg5_mask:
900e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg6_mask:
901e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg7_mask:
902e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg8_mask: {
903*fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
904e8d8bef9SDimitry Andric       return;
905e8d8bef9SDimitry Andric     }
906e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg2:
907e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg3:
908e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg4:
909e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg5:
910e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg6:
911e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg7:
912e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg8: {
913*fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
914e8d8bef9SDimitry Andric       return;
915e8d8bef9SDimitry Andric     }
916e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg2_mask:
917e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg3_mask:
918e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg4_mask:
919e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg5_mask:
920e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg6_mask:
921e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg7_mask:
922e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg8_mask: {
923*fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
924e8d8bef9SDimitry Andric       return;
925e8d8bef9SDimitry Andric     }
926e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg2:
927e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg3:
928e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg4:
929e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg5:
930e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg6:
931e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg7:
932e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg8:
933*fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
934*fe6060f1SDimitry Andric       return;
935e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg2:
936e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg3:
937e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg4:
938e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg5:
939e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg6:
940e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg7:
941*fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxseg8:
942*fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
943e8d8bef9SDimitry Andric       return;
944e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg2_mask:
945e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg3_mask:
946e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg4_mask:
947e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg5_mask:
948e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg6_mask:
949e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg7_mask:
950e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg8_mask:
951*fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
952*fe6060f1SDimitry Andric       return;
953e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg2_mask:
954e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg3_mask:
955e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg4_mask:
956e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg5_mask:
957e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg6_mask:
958e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg7_mask:
959*fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxseg8_mask:
960*fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
961*fe6060f1SDimitry Andric       return;
962*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg8ff:
963*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg7ff:
964*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg6ff:
965*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg5ff:
966*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg4ff:
967*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg3ff:
968*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg2ff: {
969*fe6060f1SDimitry Andric       selectVLSEGFF(Node, /*IsMasked*/ false);
970*fe6060f1SDimitry Andric       return;
971*fe6060f1SDimitry Andric     }
972*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg8ff_mask:
973*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg7ff_mask:
974*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg6ff_mask:
975*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg5ff_mask:
976*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg4ff_mask:
977*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg3ff_mask:
978*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg2ff_mask: {
979*fe6060f1SDimitry Andric       selectVLSEGFF(Node, /*IsMasked*/ true);
980*fe6060f1SDimitry Andric       return;
981*fe6060f1SDimitry Andric     }
982*fe6060f1SDimitry Andric     case Intrinsic::riscv_vloxei:
983*fe6060f1SDimitry Andric     case Intrinsic::riscv_vloxei_mask:
984*fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxei:
985*fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxei_mask: {
986*fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
987*fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vluxei_mask;
988*fe6060f1SDimitry Andric       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
989*fe6060f1SDimitry Andric                        IntNo == Intrinsic::riscv_vloxei_mask;
990*fe6060f1SDimitry Andric 
991*fe6060f1SDimitry Andric       MVT VT = Node->getSimpleValueType(0);
992*fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
993*fe6060f1SDimitry Andric 
994*fe6060f1SDimitry Andric       unsigned CurOp = 2;
995*fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
996*fe6060f1SDimitry Andric       if (IsMasked)
997*fe6060f1SDimitry Andric         Operands.push_back(Node->getOperand(CurOp++));
998*fe6060f1SDimitry Andric 
999*fe6060f1SDimitry Andric       MVT IndexVT;
1000*fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1001*fe6060f1SDimitry Andric                                  /*IsStridedOrIndexed*/ true, Operands,
1002*fe6060f1SDimitry Andric                                  &IndexVT);
1003*fe6060f1SDimitry Andric 
1004*fe6060f1SDimitry Andric       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1005*fe6060f1SDimitry Andric              "Element count mismatch");
1006*fe6060f1SDimitry Andric 
1007*fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1008*fe6060f1SDimitry Andric       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1009*fe6060f1SDimitry Andric       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1010*fe6060f1SDimitry Andric       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1011*fe6060f1SDimitry Andric           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1012*fe6060f1SDimitry Andric           static_cast<unsigned>(IndexLMUL));
1013*fe6060f1SDimitry Andric       MachineSDNode *Load =
1014*fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1015*fe6060f1SDimitry Andric 
1016*fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1017*fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1018*fe6060f1SDimitry Andric 
1019*fe6060f1SDimitry Andric       ReplaceNode(Node, Load);
1020*fe6060f1SDimitry Andric       return;
1021*fe6060f1SDimitry Andric     }
1022*fe6060f1SDimitry Andric     case Intrinsic::riscv_vle1:
1023*fe6060f1SDimitry Andric     case Intrinsic::riscv_vle:
1024*fe6060f1SDimitry Andric     case Intrinsic::riscv_vle_mask:
1025*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlse:
1026*fe6060f1SDimitry Andric     case Intrinsic::riscv_vlse_mask: {
1027*fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1028*fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vlse_mask;
1029*fe6060f1SDimitry Andric       bool IsStrided =
1030*fe6060f1SDimitry Andric           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1031*fe6060f1SDimitry Andric 
1032*fe6060f1SDimitry Andric       MVT VT = Node->getSimpleValueType(0);
1033*fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1034*fe6060f1SDimitry Andric 
1035*fe6060f1SDimitry Andric       unsigned CurOp = 2;
1036*fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
1037*fe6060f1SDimitry Andric       if (IsMasked)
1038*fe6060f1SDimitry Andric         Operands.push_back(Node->getOperand(CurOp++));
1039*fe6060f1SDimitry Andric 
1040*fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1041*fe6060f1SDimitry Andric                                  Operands);
1042*fe6060f1SDimitry Andric 
1043*fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1044*fe6060f1SDimitry Andric       const RISCV::VLEPseudo *P =
1045*fe6060f1SDimitry Andric           RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
1046*fe6060f1SDimitry Andric                               static_cast<unsigned>(LMUL));
1047*fe6060f1SDimitry Andric       MachineSDNode *Load =
1048*fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1049*fe6060f1SDimitry Andric 
1050*fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1051*fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1052*fe6060f1SDimitry Andric 
1053*fe6060f1SDimitry Andric       ReplaceNode(Node, Load);
1054*fe6060f1SDimitry Andric       return;
1055*fe6060f1SDimitry Andric     }
1056*fe6060f1SDimitry Andric     case Intrinsic::riscv_vleff:
1057*fe6060f1SDimitry Andric     case Intrinsic::riscv_vleff_mask: {
1058*fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1059*fe6060f1SDimitry Andric 
1060*fe6060f1SDimitry Andric       MVT VT = Node->getSimpleValueType(0);
1061*fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1062*fe6060f1SDimitry Andric 
1063*fe6060f1SDimitry Andric       unsigned CurOp = 2;
1064*fe6060f1SDimitry Andric       SmallVector<SDValue, 7> Operands;
1065*fe6060f1SDimitry Andric       if (IsMasked)
1066*fe6060f1SDimitry Andric         Operands.push_back(Node->getOperand(CurOp++));
1067*fe6060f1SDimitry Andric 
1068*fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1069*fe6060f1SDimitry Andric                                  /*IsStridedOrIndexed*/ false, Operands);
1070*fe6060f1SDimitry Andric 
1071*fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1072*fe6060f1SDimitry Andric       const RISCV::VLEPseudo *P =
1073*fe6060f1SDimitry Andric           RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
1074*fe6060f1SDimitry Andric                               static_cast<unsigned>(LMUL));
1075*fe6060f1SDimitry Andric       MachineSDNode *Load =
1076*fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
1077*fe6060f1SDimitry Andric                                  MVT::Other, MVT::Glue, Operands);
1078*fe6060f1SDimitry Andric       SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
1079*fe6060f1SDimitry Andric                                               /*Glue*/ SDValue(Load, 2));
1080*fe6060f1SDimitry Andric 
1081*fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1082*fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1083*fe6060f1SDimitry Andric 
1084*fe6060f1SDimitry Andric       ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
1085*fe6060f1SDimitry Andric       ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
1086*fe6060f1SDimitry Andric       ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
1087*fe6060f1SDimitry Andric       CurDAG->RemoveDeadNode(Node);
10880b57cec5SDimitry Andric       return;
10890b57cec5SDimitry Andric     }
10900b57cec5SDimitry Andric     }
10910b57cec5SDimitry Andric     break;
10920b57cec5SDimitry Andric   }
1093e8d8bef9SDimitry Andric   case ISD::INTRINSIC_VOID: {
1094e8d8bef9SDimitry Andric     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1095e8d8bef9SDimitry Andric     switch (IntNo) {
1096e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg2:
1097e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg3:
1098e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg4:
1099e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg5:
1100e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg6:
1101e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg7:
1102e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg8: {
1103*fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
11040b57cec5SDimitry Andric       return;
11050b57cec5SDimitry Andric     }
1106e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg2_mask:
1107e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg3_mask:
1108e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg4_mask:
1109e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg5_mask:
1110e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg6_mask:
1111e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg7_mask:
1112e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg8_mask: {
1113*fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1114e8d8bef9SDimitry Andric       return;
1115e8d8bef9SDimitry Andric     }
1116e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg2:
1117e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg3:
1118e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg4:
1119e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg5:
1120e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg6:
1121e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg7:
1122e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg8: {
1123*fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1124e8d8bef9SDimitry Andric       return;
1125e8d8bef9SDimitry Andric     }
1126e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg2_mask:
1127e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg3_mask:
1128e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg4_mask:
1129e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg5_mask:
1130e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg6_mask:
1131e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg7_mask:
1132e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg8_mask: {
1133*fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1134e8d8bef9SDimitry Andric       return;
1135e8d8bef9SDimitry Andric     }
1136e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg2:
1137e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg3:
1138e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg4:
1139e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg5:
1140e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg6:
1141e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg7:
1142e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg8:
1143*fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1144*fe6060f1SDimitry Andric       return;
1145e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg2:
1146e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg3:
1147e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg4:
1148e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg5:
1149e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg6:
1150e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg7:
1151*fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxseg8:
1152*fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1153e8d8bef9SDimitry Andric       return;
1154e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg2_mask:
1155e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg3_mask:
1156e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg4_mask:
1157e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg5_mask:
1158e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg6_mask:
1159e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg7_mask:
1160e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg8_mask:
1161*fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1162*fe6060f1SDimitry Andric       return;
1163e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg2_mask:
1164e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg3_mask:
1165e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg4_mask:
1166e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg5_mask:
1167e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg6_mask:
1168e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg7_mask:
1169*fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxseg8_mask:
1170*fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1171*fe6060f1SDimitry Andric       return;
1172*fe6060f1SDimitry Andric     case Intrinsic::riscv_vsoxei:
1173*fe6060f1SDimitry Andric     case Intrinsic::riscv_vsoxei_mask:
1174*fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxei:
1175*fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxei_mask: {
1176*fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1177*fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vsuxei_mask;
1178*fe6060f1SDimitry Andric       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1179*fe6060f1SDimitry Andric                        IntNo == Intrinsic::riscv_vsoxei_mask;
1180*fe6060f1SDimitry Andric 
1181*fe6060f1SDimitry Andric       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1182*fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1183*fe6060f1SDimitry Andric 
1184*fe6060f1SDimitry Andric       unsigned CurOp = 2;
1185*fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
1186*fe6060f1SDimitry Andric       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1187*fe6060f1SDimitry Andric 
1188*fe6060f1SDimitry Andric       MVT IndexVT;
1189*fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1190*fe6060f1SDimitry Andric                                  /*IsStridedOrIndexed*/ true, Operands,
1191*fe6060f1SDimitry Andric                                  &IndexVT);
1192*fe6060f1SDimitry Andric 
1193*fe6060f1SDimitry Andric       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1194*fe6060f1SDimitry Andric              "Element count mismatch");
1195*fe6060f1SDimitry Andric 
1196*fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1197*fe6060f1SDimitry Andric       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1198*fe6060f1SDimitry Andric       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1199*fe6060f1SDimitry Andric       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1200*fe6060f1SDimitry Andric           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1201*fe6060f1SDimitry Andric           static_cast<unsigned>(IndexLMUL));
1202*fe6060f1SDimitry Andric       MachineSDNode *Store =
1203*fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1204*fe6060f1SDimitry Andric 
1205*fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1206*fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1207*fe6060f1SDimitry Andric 
1208*fe6060f1SDimitry Andric       ReplaceNode(Node, Store);
1209*fe6060f1SDimitry Andric       return;
1210*fe6060f1SDimitry Andric     }
1211*fe6060f1SDimitry Andric     case Intrinsic::riscv_vse1:
1212*fe6060f1SDimitry Andric     case Intrinsic::riscv_vse:
1213*fe6060f1SDimitry Andric     case Intrinsic::riscv_vse_mask:
1214*fe6060f1SDimitry Andric     case Intrinsic::riscv_vsse:
1215*fe6060f1SDimitry Andric     case Intrinsic::riscv_vsse_mask: {
1216*fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1217*fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vsse_mask;
1218*fe6060f1SDimitry Andric       bool IsStrided =
1219*fe6060f1SDimitry Andric           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1220*fe6060f1SDimitry Andric 
1221*fe6060f1SDimitry Andric       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1222*fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1223*fe6060f1SDimitry Andric 
1224*fe6060f1SDimitry Andric       unsigned CurOp = 2;
1225*fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
1226*fe6060f1SDimitry Andric       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1227*fe6060f1SDimitry Andric 
1228*fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1229*fe6060f1SDimitry Andric                                  Operands);
1230*fe6060f1SDimitry Andric 
1231*fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1232*fe6060f1SDimitry Andric       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1233*fe6060f1SDimitry Andric           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1234*fe6060f1SDimitry Andric       MachineSDNode *Store =
1235*fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1236*fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1237*fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1238*fe6060f1SDimitry Andric 
1239*fe6060f1SDimitry Andric       ReplaceNode(Node, Store);
1240e8d8bef9SDimitry Andric       return;
1241e8d8bef9SDimitry Andric     }
1242e8d8bef9SDimitry Andric     }
1243e8d8bef9SDimitry Andric     break;
1244e8d8bef9SDimitry Andric   }
1245*fe6060f1SDimitry Andric   case ISD::BITCAST: {
1246*fe6060f1SDimitry Andric     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1247*fe6060f1SDimitry Andric     // Just drop bitcasts between vectors if both are fixed or both are
1248*fe6060f1SDimitry Andric     // scalable.
1249*fe6060f1SDimitry Andric     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1250*fe6060f1SDimitry Andric         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1251*fe6060f1SDimitry Andric       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1252*fe6060f1SDimitry Andric       CurDAG->RemoveDeadNode(Node);
1253e8d8bef9SDimitry Andric       return;
1254e8d8bef9SDimitry Andric     }
1255*fe6060f1SDimitry Andric     break;
1256*fe6060f1SDimitry Andric   }
1257*fe6060f1SDimitry Andric   case ISD::INSERT_SUBVECTOR: {
1258*fe6060f1SDimitry Andric     SDValue V = Node->getOperand(0);
1259*fe6060f1SDimitry Andric     SDValue SubV = Node->getOperand(1);
1260*fe6060f1SDimitry Andric     SDLoc DL(SubV);
1261*fe6060f1SDimitry Andric     auto Idx = Node->getConstantOperandVal(2);
1262*fe6060f1SDimitry Andric     MVT SubVecVT = SubV.getSimpleValueType();
1263*fe6060f1SDimitry Andric 
1264*fe6060f1SDimitry Andric     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1265*fe6060f1SDimitry Andric     MVT SubVecContainerVT = SubVecVT;
1266*fe6060f1SDimitry Andric     // Establish the correct scalable-vector types for any fixed-length type.
1267*fe6060f1SDimitry Andric     if (SubVecVT.isFixedLengthVector())
1268*fe6060f1SDimitry Andric       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1269*fe6060f1SDimitry Andric     if (VT.isFixedLengthVector())
1270*fe6060f1SDimitry Andric       VT = TLI.getContainerForFixedLengthVector(VT);
1271*fe6060f1SDimitry Andric 
1272*fe6060f1SDimitry Andric     const auto *TRI = Subtarget->getRegisterInfo();
1273*fe6060f1SDimitry Andric     unsigned SubRegIdx;
1274*fe6060f1SDimitry Andric     std::tie(SubRegIdx, Idx) =
1275*fe6060f1SDimitry Andric         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1276*fe6060f1SDimitry Andric             VT, SubVecContainerVT, Idx, TRI);
1277*fe6060f1SDimitry Andric 
1278*fe6060f1SDimitry Andric     // If the Idx hasn't been completely eliminated then this is a subvector
1279*fe6060f1SDimitry Andric     // insert which doesn't naturally align to a vector register. These must
1280*fe6060f1SDimitry Andric     // be handled using instructions to manipulate the vector registers.
1281*fe6060f1SDimitry Andric     if (Idx != 0)
1282*fe6060f1SDimitry Andric       break;
1283*fe6060f1SDimitry Andric 
1284*fe6060f1SDimitry Andric     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1285*fe6060f1SDimitry Andric     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1286*fe6060f1SDimitry Andric                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1287*fe6060f1SDimitry Andric                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1288*fe6060f1SDimitry Andric     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1289*fe6060f1SDimitry Andric     assert((!IsSubVecPartReg || V.isUndef()) &&
1290*fe6060f1SDimitry Andric            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1291*fe6060f1SDimitry Andric            "the subvector is smaller than a full-sized register");
1292*fe6060f1SDimitry Andric 
1293*fe6060f1SDimitry Andric     // If we haven't set a SubRegIdx, then we must be going between
1294*fe6060f1SDimitry Andric     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1295*fe6060f1SDimitry Andric     if (SubRegIdx == RISCV::NoSubRegister) {
1296*fe6060f1SDimitry Andric       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1297*fe6060f1SDimitry Andric       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1298*fe6060f1SDimitry Andric                  InRegClassID &&
1299*fe6060f1SDimitry Andric              "Unexpected subvector extraction");
1300*fe6060f1SDimitry Andric       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1301*fe6060f1SDimitry Andric       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1302*fe6060f1SDimitry Andric                                                DL, VT, SubV, RC);
1303*fe6060f1SDimitry Andric       ReplaceNode(Node, NewNode);
1304*fe6060f1SDimitry Andric       return;
1305*fe6060f1SDimitry Andric     }
1306*fe6060f1SDimitry Andric 
1307*fe6060f1SDimitry Andric     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1308*fe6060f1SDimitry Andric     ReplaceNode(Node, Insert.getNode());
1309*fe6060f1SDimitry Andric     return;
1310*fe6060f1SDimitry Andric   }
1311*fe6060f1SDimitry Andric   case ISD::EXTRACT_SUBVECTOR: {
1312*fe6060f1SDimitry Andric     SDValue V = Node->getOperand(0);
1313*fe6060f1SDimitry Andric     auto Idx = Node->getConstantOperandVal(1);
1314*fe6060f1SDimitry Andric     MVT InVT = V.getSimpleValueType();
1315*fe6060f1SDimitry Andric     SDLoc DL(V);
1316*fe6060f1SDimitry Andric 
1317*fe6060f1SDimitry Andric     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1318*fe6060f1SDimitry Andric     MVT SubVecContainerVT = VT;
1319*fe6060f1SDimitry Andric     // Establish the correct scalable-vector types for any fixed-length type.
1320*fe6060f1SDimitry Andric     if (VT.isFixedLengthVector())
1321*fe6060f1SDimitry Andric       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1322*fe6060f1SDimitry Andric     if (InVT.isFixedLengthVector())
1323*fe6060f1SDimitry Andric       InVT = TLI.getContainerForFixedLengthVector(InVT);
1324*fe6060f1SDimitry Andric 
1325*fe6060f1SDimitry Andric     const auto *TRI = Subtarget->getRegisterInfo();
1326*fe6060f1SDimitry Andric     unsigned SubRegIdx;
1327*fe6060f1SDimitry Andric     std::tie(SubRegIdx, Idx) =
1328*fe6060f1SDimitry Andric         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1329*fe6060f1SDimitry Andric             InVT, SubVecContainerVT, Idx, TRI);
1330*fe6060f1SDimitry Andric 
1331*fe6060f1SDimitry Andric     // If the Idx hasn't been completely eliminated then this is a subvector
1332*fe6060f1SDimitry Andric     // extract which doesn't naturally align to a vector register. These must
1333*fe6060f1SDimitry Andric     // be handled using instructions to manipulate the vector registers.
1334*fe6060f1SDimitry Andric     if (Idx != 0)
1335*fe6060f1SDimitry Andric       break;
1336*fe6060f1SDimitry Andric 
1337*fe6060f1SDimitry Andric     // If we haven't set a SubRegIdx, then we must be going between
1338*fe6060f1SDimitry Andric     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1339*fe6060f1SDimitry Andric     if (SubRegIdx == RISCV::NoSubRegister) {
1340*fe6060f1SDimitry Andric       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1341*fe6060f1SDimitry Andric       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1342*fe6060f1SDimitry Andric                  InRegClassID &&
1343*fe6060f1SDimitry Andric              "Unexpected subvector extraction");
1344*fe6060f1SDimitry Andric       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1345*fe6060f1SDimitry Andric       SDNode *NewNode =
1346*fe6060f1SDimitry Andric           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1347*fe6060f1SDimitry Andric       ReplaceNode(Node, NewNode);
1348*fe6060f1SDimitry Andric       return;
1349*fe6060f1SDimitry Andric     }
1350*fe6060f1SDimitry Andric 
1351*fe6060f1SDimitry Andric     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1352*fe6060f1SDimitry Andric     ReplaceNode(Node, Extract.getNode());
1353*fe6060f1SDimitry Andric     return;
1354*fe6060f1SDimitry Andric   }
1355*fe6060f1SDimitry Andric   case RISCVISD::VMV_V_X_VL:
1356*fe6060f1SDimitry Andric   case RISCVISD::VFMV_V_F_VL: {
1357*fe6060f1SDimitry Andric     // Try to match splat of a scalar load to a strided load with stride of x0.
1358*fe6060f1SDimitry Andric     SDValue Src = Node->getOperand(0);
1359*fe6060f1SDimitry Andric     auto *Ld = dyn_cast<LoadSDNode>(Src);
1360*fe6060f1SDimitry Andric     if (!Ld)
1361*fe6060f1SDimitry Andric       break;
1362*fe6060f1SDimitry Andric     EVT MemVT = Ld->getMemoryVT();
1363*fe6060f1SDimitry Andric     // The memory VT should be the same size as the element type.
1364*fe6060f1SDimitry Andric     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1365*fe6060f1SDimitry Andric       break;
1366*fe6060f1SDimitry Andric     if (!IsProfitableToFold(Src, Node, Node) ||
1367*fe6060f1SDimitry Andric         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1368*fe6060f1SDimitry Andric       break;
1369*fe6060f1SDimitry Andric 
1370*fe6060f1SDimitry Andric     SDValue VL;
1371*fe6060f1SDimitry Andric     selectVLOp(Node->getOperand(1), VL);
1372*fe6060f1SDimitry Andric 
1373*fe6060f1SDimitry Andric     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1374*fe6060f1SDimitry Andric     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1375*fe6060f1SDimitry Andric 
1376*fe6060f1SDimitry Andric     SDValue Operands[] = {Ld->getBasePtr(),
1377*fe6060f1SDimitry Andric                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1378*fe6060f1SDimitry Andric                           Ld->getChain()};
1379*fe6060f1SDimitry Andric 
1380*fe6060f1SDimitry Andric     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1381*fe6060f1SDimitry Andric     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1382*fe6060f1SDimitry Andric         /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
1383*fe6060f1SDimitry Andric         static_cast<unsigned>(LMUL));
1384*fe6060f1SDimitry Andric     MachineSDNode *Load =
1385*fe6060f1SDimitry Andric         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1386*fe6060f1SDimitry Andric 
1387*fe6060f1SDimitry Andric     if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1388*fe6060f1SDimitry Andric       CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1389*fe6060f1SDimitry Andric 
1390*fe6060f1SDimitry Andric     ReplaceNode(Node, Load);
1391e8d8bef9SDimitry Andric     return;
1392e8d8bef9SDimitry Andric   }
1393e8d8bef9SDimitry Andric   }
13940b57cec5SDimitry Andric 
13950b57cec5SDimitry Andric   // Select the default instruction.
13960b57cec5SDimitry Andric   SelectCode(Node);
13970b57cec5SDimitry Andric }
13980b57cec5SDimitry Andric 
13990b57cec5SDimitry Andric bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
14000b57cec5SDimitry Andric     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
14010b57cec5SDimitry Andric   switch (ConstraintID) {
14020b57cec5SDimitry Andric   case InlineAsm::Constraint_m:
14030b57cec5SDimitry Andric     // We just support simple memory operands that have a single address
14040b57cec5SDimitry Andric     // operand and need no special handling.
14050b57cec5SDimitry Andric     OutOps.push_back(Op);
14060b57cec5SDimitry Andric     return false;
14070b57cec5SDimitry Andric   case InlineAsm::Constraint_A:
14080b57cec5SDimitry Andric     OutOps.push_back(Op);
14090b57cec5SDimitry Andric     return false;
14100b57cec5SDimitry Andric   default:
14110b57cec5SDimitry Andric     break;
14120b57cec5SDimitry Andric   }
14130b57cec5SDimitry Andric 
14140b57cec5SDimitry Andric   return true;
14150b57cec5SDimitry Andric }
14160b57cec5SDimitry Andric 
14170b57cec5SDimitry Andric bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1418*fe6060f1SDimitry Andric   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
14190b57cec5SDimitry Andric     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
14200b57cec5SDimitry Andric     return true;
14210b57cec5SDimitry Andric   }
14220b57cec5SDimitry Andric   return false;
14230b57cec5SDimitry Andric }
14240b57cec5SDimitry Andric 
1425*fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1426*fe6060f1SDimitry Andric   // If this is FrameIndex, select it directly. Otherwise just let it get
1427*fe6060f1SDimitry Andric   // selected to a register independently.
1428*fe6060f1SDimitry Andric   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1429*fe6060f1SDimitry Andric     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1430*fe6060f1SDimitry Andric   else
1431*fe6060f1SDimitry Andric     Base = Addr;
1432*fe6060f1SDimitry Andric   return true;
1433e8d8bef9SDimitry Andric }
1434e8d8bef9SDimitry Andric 
1435*fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1436*fe6060f1SDimitry Andric                                         SDValue &ShAmt) {
1437*fe6060f1SDimitry Andric   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1438*fe6060f1SDimitry Andric   // amount. If there is an AND on the shift amount, we can bypass it if it
1439*fe6060f1SDimitry Andric   // doesn't affect any of those bits.
1440*fe6060f1SDimitry Andric   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1441*fe6060f1SDimitry Andric     const APInt &AndMask = N->getConstantOperandAPInt(1);
1442979e22ffSDimitry Andric 
1443*fe6060f1SDimitry Andric     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1444*fe6060f1SDimitry Andric     // mask that covers the bits needed to represent all shift amounts.
1445*fe6060f1SDimitry Andric     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1446*fe6060f1SDimitry Andric     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1447e8d8bef9SDimitry Andric 
1448*fe6060f1SDimitry Andric     if (ShMask.isSubsetOf(AndMask)) {
1449*fe6060f1SDimitry Andric       ShAmt = N.getOperand(0);
1450*fe6060f1SDimitry Andric       return true;
1451e8d8bef9SDimitry Andric     }
1452e8d8bef9SDimitry Andric 
1453*fe6060f1SDimitry Andric     // SimplifyDemandedBits may have optimized the mask so try restoring any
1454*fe6060f1SDimitry Andric     // bits that are known zero.
1455*fe6060f1SDimitry Andric     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1456*fe6060f1SDimitry Andric     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1457*fe6060f1SDimitry Andric       ShAmt = N.getOperand(0);
1458*fe6060f1SDimitry Andric       return true;
1459*fe6060f1SDimitry Andric     }
1460*fe6060f1SDimitry Andric   }
1461*fe6060f1SDimitry Andric 
1462*fe6060f1SDimitry Andric   ShAmt = N;
1463*fe6060f1SDimitry Andric   return true;
1464*fe6060f1SDimitry Andric }
1465*fe6060f1SDimitry Andric 
1466*fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1467*fe6060f1SDimitry Andric   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1468*fe6060f1SDimitry Andric       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1469*fe6060f1SDimitry Andric     Val = N.getOperand(0);
1470*fe6060f1SDimitry Andric     return true;
1471*fe6060f1SDimitry Andric   }
1472*fe6060f1SDimitry Andric   MVT VT = N.getSimpleValueType();
1473*fe6060f1SDimitry Andric   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1474*fe6060f1SDimitry Andric     Val = N;
1475*fe6060f1SDimitry Andric     return true;
1476*fe6060f1SDimitry Andric   }
1477*fe6060f1SDimitry Andric 
1478*fe6060f1SDimitry Andric   return false;
1479*fe6060f1SDimitry Andric }
1480*fe6060f1SDimitry Andric 
1481*fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1482*fe6060f1SDimitry Andric   if (N.getOpcode() == ISD::AND) {
1483*fe6060f1SDimitry Andric     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1484*fe6060f1SDimitry Andric     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1485*fe6060f1SDimitry Andric       Val = N.getOperand(0);
1486*fe6060f1SDimitry Andric       return true;
1487*fe6060f1SDimitry Andric     }
1488*fe6060f1SDimitry Andric   }
1489*fe6060f1SDimitry Andric   MVT VT = N.getSimpleValueType();
1490*fe6060f1SDimitry Andric   APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
1491*fe6060f1SDimitry Andric   if (CurDAG->MaskedValueIsZero(N, Mask)) {
1492*fe6060f1SDimitry Andric     Val = N;
1493*fe6060f1SDimitry Andric     return true;
1494*fe6060f1SDimitry Andric   }
1495*fe6060f1SDimitry Andric 
1496*fe6060f1SDimitry Andric   return false;
1497*fe6060f1SDimitry Andric }
1498*fe6060f1SDimitry Andric 
1499*fe6060f1SDimitry Andric // Select VL as a 5 bit immediate or a value that will become a register. This
1500*fe6060f1SDimitry Andric // allows us to choose betwen VSETIVLI or VSETVLI later.
1501d409305fSDimitry Andric bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1502d409305fSDimitry Andric   auto *C = dyn_cast<ConstantSDNode>(N);
1503*fe6060f1SDimitry Andric   if (C && isUInt<5>(C->getZExtValue()))
1504*fe6060f1SDimitry Andric     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1505*fe6060f1SDimitry Andric                                    N->getValueType(0));
1506d409305fSDimitry Andric   else
1507d409305fSDimitry Andric     VL = N;
1508d409305fSDimitry Andric 
1509d409305fSDimitry Andric   return true;
1510d409305fSDimitry Andric }
1511d409305fSDimitry Andric 
1512e8d8bef9SDimitry Andric bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1513e8d8bef9SDimitry Andric   if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1514*fe6060f1SDimitry Andric       N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1515*fe6060f1SDimitry Andric       N.getOpcode() != RISCVISD::VMV_V_X_VL)
1516e8d8bef9SDimitry Andric     return false;
1517e8d8bef9SDimitry Andric   SplatVal = N.getOperand(0);
1518979e22ffSDimitry Andric   return true;
1519979e22ffSDimitry Andric }
1520e8d8bef9SDimitry Andric 
1521*fe6060f1SDimitry Andric using ValidateFn = bool (*)(int64_t);
1522*fe6060f1SDimitry Andric 
1523*fe6060f1SDimitry Andric static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1524*fe6060f1SDimitry Andric                                    SelectionDAG &DAG,
1525*fe6060f1SDimitry Andric                                    const RISCVSubtarget &Subtarget,
1526*fe6060f1SDimitry Andric                                    ValidateFn ValidateImm) {
1527e8d8bef9SDimitry Andric   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1528*fe6060f1SDimitry Andric        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1529*fe6060f1SDimitry Andric        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1530e8d8bef9SDimitry Andric       !isa<ConstantSDNode>(N.getOperand(0)))
1531979e22ffSDimitry Andric     return false;
1532e8d8bef9SDimitry Andric 
1533e8d8bef9SDimitry Andric   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1534e8d8bef9SDimitry Andric 
1535*fe6060f1SDimitry Andric   // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1536*fe6060f1SDimitry Andric   // share semantics when the operand type is wider than the resulting vector
1537*fe6060f1SDimitry Andric   // element type: an implicit truncation first takes place. Therefore, perform
1538*fe6060f1SDimitry Andric   // a manual truncation/sign-extension in order to ignore any truncated bits
1539*fe6060f1SDimitry Andric   // and catch any zero-extended immediate.
1540e8d8bef9SDimitry Andric   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1541e8d8bef9SDimitry Andric   // sign-extending to (XLenVT -1).
1542*fe6060f1SDimitry Andric   MVT XLenVT = Subtarget.getXLenVT();
1543e8d8bef9SDimitry Andric   assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1544e8d8bef9SDimitry Andric          "Unexpected splat operand type");
1545*fe6060f1SDimitry Andric   MVT EltVT = N.getSimpleValueType().getVectorElementType();
1546*fe6060f1SDimitry Andric   if (EltVT.bitsLT(XLenVT))
1547e8d8bef9SDimitry Andric     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1548979e22ffSDimitry Andric 
1549*fe6060f1SDimitry Andric   if (!ValidateImm(SplatImm))
1550e8d8bef9SDimitry Andric     return false;
1551979e22ffSDimitry Andric 
1552*fe6060f1SDimitry Andric   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1553979e22ffSDimitry Andric   return true;
1554979e22ffSDimitry Andric }
1555e8d8bef9SDimitry Andric 
1556*fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
1557*fe6060f1SDimitry Andric   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1558*fe6060f1SDimitry Andric                                 [](int64_t Imm) { return isInt<5>(Imm); });
1559*fe6060f1SDimitry Andric }
1560*fe6060f1SDimitry Andric 
1561*fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
1562*fe6060f1SDimitry Andric   return selectVSplatSimmHelper(
1563*fe6060f1SDimitry Andric       N, SplatVal, *CurDAG, *Subtarget,
1564*fe6060f1SDimitry Andric       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1565*fe6060f1SDimitry Andric }
1566*fe6060f1SDimitry Andric 
1567*fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
1568*fe6060f1SDimitry Andric                                                       SDValue &SplatVal) {
1569*fe6060f1SDimitry Andric   return selectVSplatSimmHelper(
1570*fe6060f1SDimitry Andric       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1571*fe6060f1SDimitry Andric         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1572*fe6060f1SDimitry Andric       });
1573*fe6060f1SDimitry Andric }
1574*fe6060f1SDimitry Andric 
1575e8d8bef9SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
1576e8d8bef9SDimitry Andric   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1577*fe6060f1SDimitry Andric        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1578*fe6060f1SDimitry Andric        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1579e8d8bef9SDimitry Andric       !isa<ConstantSDNode>(N.getOperand(0)))
1580979e22ffSDimitry Andric     return false;
1581979e22ffSDimitry Andric 
1582e8d8bef9SDimitry Andric   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1583979e22ffSDimitry Andric 
1584e8d8bef9SDimitry Andric   if (!isUInt<5>(SplatImm))
1585e8d8bef9SDimitry Andric     return false;
1586e8d8bef9SDimitry Andric 
1587e8d8bef9SDimitry Andric   SplatVal =
1588e8d8bef9SDimitry Andric       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1589e8d8bef9SDimitry Andric 
1590979e22ffSDimitry Andric   return true;
1591979e22ffSDimitry Andric }
1592979e22ffSDimitry Andric 
1593*fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
1594*fe6060f1SDimitry Andric                                        SDValue &Imm) {
1595*fe6060f1SDimitry Andric   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1596*fe6060f1SDimitry Andric     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1597*fe6060f1SDimitry Andric 
1598*fe6060f1SDimitry Andric     if (!isInt<5>(ImmVal))
1599*fe6060f1SDimitry Andric       return false;
1600*fe6060f1SDimitry Andric 
1601*fe6060f1SDimitry Andric     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1602*fe6060f1SDimitry Andric     return true;
1603*fe6060f1SDimitry Andric   }
1604*fe6060f1SDimitry Andric 
1605*fe6060f1SDimitry Andric   return false;
1606*fe6060f1SDimitry Andric }
1607*fe6060f1SDimitry Andric 
16080b57cec5SDimitry Andric // Merge an ADDI into the offset of a load/store instruction where possible.
16095ffd83dbSDimitry Andric // (load (addi base, off1), off2) -> (load base, off1+off2)
16105ffd83dbSDimitry Andric // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
16115ffd83dbSDimitry Andric // This is possible when off1+off2 fits a 12-bit immediate.
16120b57cec5SDimitry Andric void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() {
16130b57cec5SDimitry Andric   SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
16140b57cec5SDimitry Andric   ++Position;
16150b57cec5SDimitry Andric 
16160b57cec5SDimitry Andric   while (Position != CurDAG->allnodes_begin()) {
16170b57cec5SDimitry Andric     SDNode *N = &*--Position;
16180b57cec5SDimitry Andric     // Skip dead nodes and any non-machine opcodes.
16190b57cec5SDimitry Andric     if (N->use_empty() || !N->isMachineOpcode())
16200b57cec5SDimitry Andric       continue;
16210b57cec5SDimitry Andric 
16220b57cec5SDimitry Andric     int OffsetOpIdx;
16230b57cec5SDimitry Andric     int BaseOpIdx;
16240b57cec5SDimitry Andric 
16250b57cec5SDimitry Andric     // Only attempt this optimisation for I-type loads and S-type stores.
16260b57cec5SDimitry Andric     switch (N->getMachineOpcode()) {
16270b57cec5SDimitry Andric     default:
16280b57cec5SDimitry Andric       continue;
16290b57cec5SDimitry Andric     case RISCV::LB:
16300b57cec5SDimitry Andric     case RISCV::LH:
16310b57cec5SDimitry Andric     case RISCV::LW:
16320b57cec5SDimitry Andric     case RISCV::LBU:
16330b57cec5SDimitry Andric     case RISCV::LHU:
16340b57cec5SDimitry Andric     case RISCV::LWU:
16350b57cec5SDimitry Andric     case RISCV::LD:
1636e8d8bef9SDimitry Andric     case RISCV::FLH:
16370b57cec5SDimitry Andric     case RISCV::FLW:
16380b57cec5SDimitry Andric     case RISCV::FLD:
16390b57cec5SDimitry Andric       BaseOpIdx = 0;
16400b57cec5SDimitry Andric       OffsetOpIdx = 1;
16410b57cec5SDimitry Andric       break;
16420b57cec5SDimitry Andric     case RISCV::SB:
16430b57cec5SDimitry Andric     case RISCV::SH:
16440b57cec5SDimitry Andric     case RISCV::SW:
16450b57cec5SDimitry Andric     case RISCV::SD:
1646e8d8bef9SDimitry Andric     case RISCV::FSH:
16470b57cec5SDimitry Andric     case RISCV::FSW:
16480b57cec5SDimitry Andric     case RISCV::FSD:
16490b57cec5SDimitry Andric       BaseOpIdx = 1;
16500b57cec5SDimitry Andric       OffsetOpIdx = 2;
16510b57cec5SDimitry Andric       break;
16520b57cec5SDimitry Andric     }
16530b57cec5SDimitry Andric 
16545ffd83dbSDimitry Andric     if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
16550b57cec5SDimitry Andric       continue;
16560b57cec5SDimitry Andric 
16570b57cec5SDimitry Andric     SDValue Base = N->getOperand(BaseOpIdx);
16580b57cec5SDimitry Andric 
16590b57cec5SDimitry Andric     // If the base is an ADDI, we can merge it in to the load/store.
16600b57cec5SDimitry Andric     if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
16610b57cec5SDimitry Andric       continue;
16620b57cec5SDimitry Andric 
16630b57cec5SDimitry Andric     SDValue ImmOperand = Base.getOperand(1);
16645ffd83dbSDimitry Andric     uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
16650b57cec5SDimitry Andric 
1666*fe6060f1SDimitry Andric     if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
16675ffd83dbSDimitry Andric       int64_t Offset1 = Const->getSExtValue();
16685ffd83dbSDimitry Andric       int64_t CombinedOffset = Offset1 + Offset2;
16695ffd83dbSDimitry Andric       if (!isInt<12>(CombinedOffset))
16705ffd83dbSDimitry Andric         continue;
16715ffd83dbSDimitry Andric       ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
16725ffd83dbSDimitry Andric                                              ImmOperand.getValueType());
1673*fe6060f1SDimitry Andric     } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
16745ffd83dbSDimitry Andric       // If the off1 in (addi base, off1) is a global variable's address (its
16755ffd83dbSDimitry Andric       // low part, really), then we can rely on the alignment of that variable
16765ffd83dbSDimitry Andric       // to provide a margin of safety before off1 can overflow the 12 bits.
16775ffd83dbSDimitry Andric       // Check if off2 falls within that margin; if so off1+off2 can't overflow.
16785ffd83dbSDimitry Andric       const DataLayout &DL = CurDAG->getDataLayout();
16795ffd83dbSDimitry Andric       Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
16805ffd83dbSDimitry Andric       if (Offset2 != 0 && Alignment <= Offset2)
16815ffd83dbSDimitry Andric         continue;
16825ffd83dbSDimitry Andric       int64_t Offset1 = GA->getOffset();
16835ffd83dbSDimitry Andric       int64_t CombinedOffset = Offset1 + Offset2;
16840b57cec5SDimitry Andric       ImmOperand = CurDAG->getTargetGlobalAddress(
16850b57cec5SDimitry Andric           GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
16865ffd83dbSDimitry Andric           CombinedOffset, GA->getTargetFlags());
1687*fe6060f1SDimitry Andric     } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
16885ffd83dbSDimitry Andric       // Ditto.
16895ffd83dbSDimitry Andric       Align Alignment = CP->getAlign();
16905ffd83dbSDimitry Andric       if (Offset2 != 0 && Alignment <= Offset2)
16915ffd83dbSDimitry Andric         continue;
16925ffd83dbSDimitry Andric       int64_t Offset1 = CP->getOffset();
16935ffd83dbSDimitry Andric       int64_t CombinedOffset = Offset1 + Offset2;
16945ffd83dbSDimitry Andric       ImmOperand = CurDAG->getTargetConstantPool(
16955ffd83dbSDimitry Andric           CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
16965ffd83dbSDimitry Andric           CombinedOffset, CP->getTargetFlags());
16970b57cec5SDimitry Andric     } else {
16980b57cec5SDimitry Andric       continue;
16990b57cec5SDimitry Andric     }
17000b57cec5SDimitry Andric 
17010b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
17020b57cec5SDimitry Andric     LLVM_DEBUG(Base->dump(CurDAG));
17030b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "\nN: ");
17040b57cec5SDimitry Andric     LLVM_DEBUG(N->dump(CurDAG));
17050b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "\n");
17060b57cec5SDimitry Andric 
17070b57cec5SDimitry Andric     // Modify the offset operand of the load/store.
17080b57cec5SDimitry Andric     if (BaseOpIdx == 0) // Load
17090b57cec5SDimitry Andric       CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
17100b57cec5SDimitry Andric                                  N->getOperand(2));
17110b57cec5SDimitry Andric     else // Store
17120b57cec5SDimitry Andric       CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
17130b57cec5SDimitry Andric                                  ImmOperand, N->getOperand(3));
17140b57cec5SDimitry Andric 
17150b57cec5SDimitry Andric     // The add-immediate may now be dead, in which case remove it.
17160b57cec5SDimitry Andric     if (Base.getNode()->use_empty())
17170b57cec5SDimitry Andric       CurDAG->RemoveDeadNode(Base.getNode());
17180b57cec5SDimitry Andric   }
17190b57cec5SDimitry Andric }
17200b57cec5SDimitry Andric 
17210b57cec5SDimitry Andric // This pass converts a legalized DAG into a RISCV-specific DAG, ready
17220b57cec5SDimitry Andric // for instruction scheduling.
17230b57cec5SDimitry Andric FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
17240b57cec5SDimitry Andric   return new RISCVDAGToDAGISel(TM);
17250b57cec5SDimitry Andric }
1726