xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (revision e9e8876a4d6afc1ad5315faaa191b25121a813d7)
1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the RISCV target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVISelDAGToDAG.h"
14 #include "MCTargetDesc/RISCVMCTargetDesc.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCVISelLowering.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/IR/IntrinsicsRISCV.h"
20 #include "llvm/Support/Alignment.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/KnownBits.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/raw_ostream.h"
25 
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "riscv-isel"
29 
30 namespace llvm {
31 namespace RISCV {
32 #define GET_RISCVVSSEGTable_IMPL
33 #define GET_RISCVVLSEGTable_IMPL
34 #define GET_RISCVVLXSEGTable_IMPL
35 #define GET_RISCVVSXSEGTable_IMPL
36 #define GET_RISCVVLETable_IMPL
37 #define GET_RISCVVSETable_IMPL
38 #define GET_RISCVVLXTable_IMPL
39 #define GET_RISCVVSXTable_IMPL
40 #include "RISCVGenSearchableTables.inc"
41 } // namespace RISCV
42 } // namespace llvm
43 
44 void RISCVDAGToDAGISel::PreprocessISelDAG() {
45   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
46                                        E = CurDAG->allnodes_end();
47        I != E;) {
48     SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
49 
50     // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
51     // load. Done after lowering and combining so that we have a chance to
52     // optimize this to VMV_V_X_VL when the upper bits aren't needed.
53     if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
54       continue;
55 
56     assert(N->getNumOperands() == 3 && "Unexpected number of operands");
57     MVT VT = N->getSimpleValueType(0);
58     SDValue Lo = N->getOperand(0);
59     SDValue Hi = N->getOperand(1);
60     SDValue VL = N->getOperand(2);
61     assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
62            Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
63            "Unexpected VTs!");
64     MachineFunction &MF = CurDAG->getMachineFunction();
65     RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
66     SDLoc DL(N);
67 
68     // We use the same frame index we use for moving two i32s into 64-bit FPR.
69     // This is an analogous operation.
70     int FI = FuncInfo->getMoveF64FrameIndex(MF);
71     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
72     const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
73     SDValue StackSlot =
74         CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
75 
76     SDValue Chain = CurDAG->getEntryNode();
77     Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
78 
79     SDValue OffsetSlot =
80         CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
81     Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
82                           Align(8));
83 
84     Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
85 
86     SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
87     SDValue IntID =
88         CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
89     SDValue Ops[] = {Chain, IntID, StackSlot,
90                      CurDAG->getRegister(RISCV::X0, MVT::i64), VL};
91 
92     SDValue Result = CurDAG->getMemIntrinsicNode(
93         ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
94         MachineMemOperand::MOLoad);
95 
96     // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
97     // vlse we created.  This will cause general havok on the dag because
98     // anything below the conversion could be folded into other existing nodes.
99     // To avoid invalidating 'I', back it up to the convert node.
100     --I;
101     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
102 
103     // Now that we did that, the node is dead.  Increment the iterator to the
104     // next node to process, then delete N.
105     ++I;
106     CurDAG->DeleteNode(N);
107   }
108 }
109 
110 void RISCVDAGToDAGISel::PostprocessISelDAG() {
111   doPeepholeLoadStoreADDI();
112 }
113 
114 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
115                          const RISCVSubtarget &Subtarget) {
116   MVT XLenVT = Subtarget.getXLenVT();
117   RISCVMatInt::InstSeq Seq =
118       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
119 
120   SDNode *Result = nullptr;
121   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
122   for (RISCVMatInt::Inst &Inst : Seq) {
123     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
124     if (Inst.Opc == RISCV::LUI)
125       Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
126     else if (Inst.Opc == RISCV::ADDUW)
127       Result = CurDAG->getMachineNode(RISCV::ADDUW, DL, XLenVT, SrcReg,
128                                       CurDAG->getRegister(RISCV::X0, XLenVT));
129     else
130       Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
131 
132     // Only the first instruction has X0 as its source.
133     SrcReg = SDValue(Result, 0);
134   }
135 
136   return Result;
137 }
138 
139 static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
140                                unsigned RegClassID, unsigned SubReg0) {
141   assert(Regs.size() >= 2 && Regs.size() <= 8);
142 
143   SDLoc DL(Regs[0]);
144   SmallVector<SDValue, 8> Ops;
145 
146   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
147 
148   for (unsigned I = 0; I < Regs.size(); ++I) {
149     Ops.push_back(Regs[I]);
150     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
151   }
152   SDNode *N =
153       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
154   return SDValue(N, 0);
155 }
156 
157 static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
158                              unsigned NF) {
159   static const unsigned RegClassIDs[] = {
160       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
161       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
162       RISCV::VRN8M1RegClassID};
163 
164   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
165 }
166 
167 static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
168                              unsigned NF) {
169   static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
170                                          RISCV::VRN3M2RegClassID,
171                                          RISCV::VRN4M2RegClassID};
172 
173   return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
174 }
175 
176 static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
177                              unsigned NF) {
178   return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
179                          RISCV::sub_vrm4_0);
180 }
181 
182 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
183                            unsigned NF, RISCVII::VLMUL LMUL) {
184   switch (LMUL) {
185   default:
186     llvm_unreachable("Invalid LMUL.");
187   case RISCVII::VLMUL::LMUL_F8:
188   case RISCVII::VLMUL::LMUL_F4:
189   case RISCVII::VLMUL::LMUL_F2:
190   case RISCVII::VLMUL::LMUL_1:
191     return createM1Tuple(CurDAG, Regs, NF);
192   case RISCVII::VLMUL::LMUL_2:
193     return createM2Tuple(CurDAG, Regs, NF);
194   case RISCVII::VLMUL::LMUL_4:
195     return createM4Tuple(CurDAG, Regs, NF);
196   }
197 }
198 
199 void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
200     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
201     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
202     MVT *IndexVT) {
203   SDValue Chain = Node->getOperand(0);
204   SDValue Glue;
205 
206   SDValue Base;
207   SelectBaseAddr(Node->getOperand(CurOp++), Base);
208   Operands.push_back(Base); // Base pointer.
209 
210   if (IsStridedOrIndexed) {
211     Operands.push_back(Node->getOperand(CurOp++)); // Index.
212     if (IndexVT)
213       *IndexVT = Operands.back()->getSimpleValueType(0);
214   }
215 
216   if (IsMasked) {
217     // Mask needs to be copied to V0.
218     SDValue Mask = Node->getOperand(CurOp++);
219     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
220     Glue = Chain.getValue(1);
221     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
222   }
223   SDValue VL;
224   selectVLOp(Node->getOperand(CurOp++), VL);
225   Operands.push_back(VL);
226 
227   MVT XLenVT = Subtarget->getXLenVT();
228   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
229   Operands.push_back(SEWOp);
230 
231   Operands.push_back(Chain); // Chain.
232   if (Glue)
233     Operands.push_back(Glue);
234 }
235 
236 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
237                                     bool IsStrided) {
238   SDLoc DL(Node);
239   unsigned NF = Node->getNumValues() - 1;
240   MVT VT = Node->getSimpleValueType(0);
241   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
242   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
243 
244   unsigned CurOp = 2;
245   SmallVector<SDValue, 8> Operands;
246   if (IsMasked) {
247     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
248                                  Node->op_begin() + CurOp + NF);
249     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
250     Operands.push_back(MaskedOff);
251     CurOp += NF;
252   }
253 
254   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
255                              Operands);
256 
257   const RISCV::VLSEGPseudo *P =
258       RISCV::getVLSEGPseudo(NF, IsMasked, IsStrided, /*FF*/ false, Log2SEW,
259                             static_cast<unsigned>(LMUL));
260   MachineSDNode *Load =
261       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
262 
263   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
264     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
265 
266   SDValue SuperReg = SDValue(Load, 0);
267   for (unsigned I = 0; I < NF; ++I) {
268     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
269     ReplaceUses(SDValue(Node, I),
270                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
271   }
272 
273   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
274   CurDAG->RemoveDeadNode(Node);
275 }
276 
277 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
278   SDLoc DL(Node);
279   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
280   MVT VT = Node->getSimpleValueType(0);
281   MVT XLenVT = Subtarget->getXLenVT();
282   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
283   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
284 
285   unsigned CurOp = 2;
286   SmallVector<SDValue, 7> Operands;
287   if (IsMasked) {
288     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
289                                  Node->op_begin() + CurOp + NF);
290     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
291     Operands.push_back(MaskedOff);
292     CurOp += NF;
293   }
294 
295   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
296                              /*IsStridedOrIndexed*/ false, Operands);
297 
298   const RISCV::VLSEGPseudo *P =
299       RISCV::getVLSEGPseudo(NF, IsMasked, /*Strided*/ false, /*FF*/ true,
300                             Log2SEW, static_cast<unsigned>(LMUL));
301   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
302                                                MVT::Other, MVT::Glue, Operands);
303   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
304                                           /*Glue*/ SDValue(Load, 2));
305 
306   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
307     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
308 
309   SDValue SuperReg = SDValue(Load, 0);
310   for (unsigned I = 0; I < NF; ++I) {
311     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
312     ReplaceUses(SDValue(Node, I),
313                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
314   }
315 
316   ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
317   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
318   CurDAG->RemoveDeadNode(Node);
319 }
320 
321 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
322                                      bool IsOrdered) {
323   SDLoc DL(Node);
324   unsigned NF = Node->getNumValues() - 1;
325   MVT VT = Node->getSimpleValueType(0);
326   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
327   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
328 
329   unsigned CurOp = 2;
330   SmallVector<SDValue, 8> Operands;
331   if (IsMasked) {
332     SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
333                                  Node->op_begin() + CurOp + NF);
334     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
335     Operands.push_back(MaskedOff);
336     CurOp += NF;
337   }
338 
339   MVT IndexVT;
340   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
341                              /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
342 
343   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
344          "Element count mismatch");
345 
346   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
347   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
348   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
349       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
350       static_cast<unsigned>(IndexLMUL));
351   MachineSDNode *Load =
352       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
353 
354   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
355     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
356 
357   SDValue SuperReg = SDValue(Load, 0);
358   for (unsigned I = 0; I < NF; ++I) {
359     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
360     ReplaceUses(SDValue(Node, I),
361                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
362   }
363 
364   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
365   CurDAG->RemoveDeadNode(Node);
366 }
367 
368 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
369                                     bool IsStrided) {
370   SDLoc DL(Node);
371   unsigned NF = Node->getNumOperands() - 4;
372   if (IsStrided)
373     NF--;
374   if (IsMasked)
375     NF--;
376   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
377   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
378   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
379   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
380   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
381 
382   SmallVector<SDValue, 8> Operands;
383   Operands.push_back(StoreVal);
384   unsigned CurOp = 2 + NF;
385 
386   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
387                              Operands);
388 
389   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
390       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
391   MachineSDNode *Store =
392       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
393 
394   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
395     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
396 
397   ReplaceNode(Node, Store);
398 }
399 
400 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
401                                      bool IsOrdered) {
402   SDLoc DL(Node);
403   unsigned NF = Node->getNumOperands() - 5;
404   if (IsMasked)
405     --NF;
406   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
407   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
408   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
409   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
410   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
411 
412   SmallVector<SDValue, 8> Operands;
413   Operands.push_back(StoreVal);
414   unsigned CurOp = 2 + NF;
415 
416   MVT IndexVT;
417   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
418                              /*IsStridedOrIndexed*/ true, Operands, &IndexVT);
419 
420   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
421          "Element count mismatch");
422 
423   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
424   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
425   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
426       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
427       static_cast<unsigned>(IndexLMUL));
428   MachineSDNode *Store =
429       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
430 
431   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
432     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
433 
434   ReplaceNode(Node, Store);
435 }
436 
437 
438 void RISCVDAGToDAGISel::Select(SDNode *Node) {
439   // If we have a custom node, we have already selected.
440   if (Node->isMachineOpcode()) {
441     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
442     Node->setNodeId(-1);
443     return;
444   }
445 
446   // Instruction Selection not handled by the auto-generated tablegen selection
447   // should be handled here.
448   unsigned Opcode = Node->getOpcode();
449   MVT XLenVT = Subtarget->getXLenVT();
450   SDLoc DL(Node);
451   MVT VT = Node->getSimpleValueType(0);
452 
453   switch (Opcode) {
454   case ISD::Constant: {
455     auto *ConstNode = cast<ConstantSDNode>(Node);
456     if (VT == XLenVT && ConstNode->isNullValue()) {
457       SDValue New =
458           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
459       ReplaceNode(Node, New.getNode());
460       return;
461     }
462     ReplaceNode(Node,
463                 selectImm(CurDAG, DL, ConstNode->getSExtValue(), *Subtarget));
464     return;
465   }
466   case ISD::FrameIndex: {
467     SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
468     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
469     SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
470     ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
471     return;
472   }
473   case ISD::SRL: {
474     // We don't need this transform if zext.h is supported.
475     if (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())
476       break;
477     // Optimize (srl (and X, 0xffff), C) ->
478     //          (srli (slli X, (XLen-16), (XLen-16) + C)
479     // Taking into account that the 0xffff may have had lower bits unset by
480     // SimplifyDemandedBits. This avoids materializing the 0xffff immediate.
481     // This pattern occurs when type legalizing i16 right shifts.
482     // FIXME: This could be extended to other AND masks.
483     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
484     if (N1C) {
485       uint64_t ShAmt = N1C->getZExtValue();
486       SDValue N0 = Node->getOperand(0);
487       if (ShAmt < 16 && N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
488           isa<ConstantSDNode>(N0.getOperand(1))) {
489         uint64_t Mask = N0.getConstantOperandVal(1);
490         Mask |= maskTrailingOnes<uint64_t>(ShAmt);
491         if (Mask == 0xffff) {
492           unsigned LShAmt = Subtarget->getXLen() - 16;
493           SDNode *SLLI =
494               CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
495                                      CurDAG->getTargetConstant(LShAmt, DL, VT));
496           SDNode *SRLI = CurDAG->getMachineNode(
497               RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
498               CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
499           ReplaceNode(Node, SRLI);
500           return;
501         }
502       }
503     }
504 
505     break;
506   }
507   case ISD::AND: {
508     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
509     if (!N1C)
510       break;
511 
512     SDValue N0 = Node->getOperand(0);
513 
514     bool LeftShift = N0.getOpcode() == ISD::SHL;
515     if (!LeftShift && N0.getOpcode() != ISD::SRL)
516       break;
517 
518     auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
519     if (!C)
520       break;
521     uint64_t C2 = C->getZExtValue();
522     unsigned XLen = Subtarget->getXLen();
523     if (!C2 || C2 >= XLen)
524       break;
525 
526     uint64_t C1 = N1C->getZExtValue();
527 
528     // Keep track of whether this is a andi, zext.h, or zext.w.
529     bool ZExtOrANDI = isInt<12>(N1C->getSExtValue());
530     if (C1 == UINT64_C(0xFFFF) &&
531         (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp()))
532       ZExtOrANDI = true;
533     if (C1 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba())
534       ZExtOrANDI = true;
535 
536     // Clear irrelevant bits in the mask.
537     if (LeftShift)
538       C1 &= maskTrailingZeros<uint64_t>(C2);
539     else
540       C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
541 
542     // Some transforms should only be done if the shift has a single use or
543     // the AND would become (srli (slli X, 32), 32)
544     bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
545 
546     SDValue X = N0.getOperand(0);
547 
548     // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
549     // with c3 leading zeros.
550     if (!LeftShift && isMask_64(C1)) {
551       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
552       if (C2 < C3) {
553         // If the number of leading zeros is C2+32 this can be SRLIW.
554         if (C2 + 32 == C3) {
555           SDNode *SRLIW =
556               CurDAG->getMachineNode(RISCV::SRLIW, DL, XLenVT, X,
557                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
558           ReplaceNode(Node, SRLIW);
559           return;
560         }
561 
562         // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
563         // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
564         //
565         // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
566         // legalized and goes through DAG combine.
567         SDValue Y;
568         if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
569             selectSExti32(X, Y)) {
570           SDNode *SRAIW =
571               CurDAG->getMachineNode(RISCV::SRAIW, DL, XLenVT, Y,
572                                      CurDAG->getTargetConstant(31, DL, XLenVT));
573           SDNode *SRLIW = CurDAG->getMachineNode(
574               RISCV::SRLIW, DL, XLenVT, SDValue(SRAIW, 0),
575               CurDAG->getTargetConstant(C3 - 32, DL, XLenVT));
576           ReplaceNode(Node, SRLIW);
577           return;
578         }
579 
580         // (srli (slli x, c3-c2), c3).
581         if (OneUseOrZExtW && !ZExtOrANDI) {
582           SDNode *SLLI = CurDAG->getMachineNode(
583               RISCV::SLLI, DL, XLenVT, X,
584               CurDAG->getTargetConstant(C3 - C2, DL, XLenVT));
585           SDNode *SRLI =
586               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
587                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
588           ReplaceNode(Node, SRLI);
589           return;
590         }
591       }
592     }
593 
594     // Turn (and (shl x, c2) c1) -> (srli (slli c2+c3), c3) if c1 is a mask
595     // shifted by c2 bits with c3 leading zeros.
596     if (LeftShift && isShiftedMask_64(C1)) {
597       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
598 
599       if (C2 + C3 < XLen &&
600           C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
601         // Use slli.uw when possible.
602         if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
603           SDNode *SLLIUW =
604               CurDAG->getMachineNode(RISCV::SLLIUW, DL, XLenVT, X,
605                                      CurDAG->getTargetConstant(C2, DL, XLenVT));
606           ReplaceNode(Node, SLLIUW);
607           return;
608         }
609 
610         // (srli (slli c2+c3), c3)
611         if (OneUseOrZExtW && !ZExtOrANDI) {
612           SDNode *SLLI = CurDAG->getMachineNode(
613               RISCV::SLLI, DL, XLenVT, X,
614               CurDAG->getTargetConstant(C2 + C3, DL, XLenVT));
615           SDNode *SRLI =
616               CurDAG->getMachineNode(RISCV::SRLI, DL, XLenVT, SDValue(SLLI, 0),
617                                      CurDAG->getTargetConstant(C3, DL, XLenVT));
618           ReplaceNode(Node, SRLI);
619           return;
620         }
621       }
622     }
623 
624     break;
625   }
626   case ISD::INTRINSIC_WO_CHAIN: {
627     unsigned IntNo = Node->getConstantOperandVal(0);
628     switch (IntNo) {
629       // By default we do not custom select any intrinsic.
630     default:
631       break;
632     case Intrinsic::riscv_vmsgeu:
633     case Intrinsic::riscv_vmsge: {
634       SDValue Src1 = Node->getOperand(1);
635       SDValue Src2 = Node->getOperand(2);
636       // Only custom select scalar second operand.
637       if (Src2.getValueType() != XLenVT)
638         break;
639       // Small constants are handled with patterns.
640       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
641         int64_t CVal = C->getSExtValue();
642         if (CVal >= -15 && CVal <= 16)
643           break;
644       }
645       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
646       MVT Src1VT = Src1.getSimpleValueType();
647       unsigned VMSLTOpcode, VMNANDOpcode;
648       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
649       default:
650         llvm_unreachable("Unexpected LMUL!");
651       case RISCVII::VLMUL::LMUL_F8:
652         VMSLTOpcode =
653             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
654         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
655         break;
656       case RISCVII::VLMUL::LMUL_F4:
657         VMSLTOpcode =
658             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
659         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
660         break;
661       case RISCVII::VLMUL::LMUL_F2:
662         VMSLTOpcode =
663             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
664         VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
665         break;
666       case RISCVII::VLMUL::LMUL_1:
667         VMSLTOpcode =
668             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
669         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
670         break;
671       case RISCVII::VLMUL::LMUL_2:
672         VMSLTOpcode =
673             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
674         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
675         break;
676       case RISCVII::VLMUL::LMUL_4:
677         VMSLTOpcode =
678             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
679         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
680         break;
681       case RISCVII::VLMUL::LMUL_8:
682         VMSLTOpcode =
683             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
684         VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
685         break;
686       }
687       SDValue SEW = CurDAG->getTargetConstant(
688           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
689       SDValue VL;
690       selectVLOp(Node->getOperand(3), VL);
691 
692       // Expand to
693       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
694       SDValue Cmp = SDValue(
695           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
696           0);
697       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
698                                                {Cmp, Cmp, VL, SEW}));
699       return;
700     }
701     case Intrinsic::riscv_vmsgeu_mask:
702     case Intrinsic::riscv_vmsge_mask: {
703       SDValue Src1 = Node->getOperand(2);
704       SDValue Src2 = Node->getOperand(3);
705       // Only custom select scalar second operand.
706       if (Src2.getValueType() != XLenVT)
707         break;
708       // Small constants are handled with patterns.
709       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
710         int64_t CVal = C->getSExtValue();
711         if (CVal >= -15 && CVal <= 16)
712           break;
713       }
714       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
715       MVT Src1VT = Src1.getSimpleValueType();
716       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
717       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
718       default:
719         llvm_unreachable("Unexpected LMUL!");
720       case RISCVII::VLMUL::LMUL_F8:
721         VMSLTOpcode =
722             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
723         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
724                                      : RISCV::PseudoVMSLT_VX_MF8_MASK;
725         break;
726       case RISCVII::VLMUL::LMUL_F4:
727         VMSLTOpcode =
728             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
729         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
730                                      : RISCV::PseudoVMSLT_VX_MF4_MASK;
731         break;
732       case RISCVII::VLMUL::LMUL_F2:
733         VMSLTOpcode =
734             IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
735         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
736                                      : RISCV::PseudoVMSLT_VX_MF2_MASK;
737         break;
738       case RISCVII::VLMUL::LMUL_1:
739         VMSLTOpcode =
740             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
741         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
742                                      : RISCV::PseudoVMSLT_VX_M1_MASK;
743         break;
744       case RISCVII::VLMUL::LMUL_2:
745         VMSLTOpcode =
746             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
747         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
748                                      : RISCV::PseudoVMSLT_VX_M2_MASK;
749         break;
750       case RISCVII::VLMUL::LMUL_4:
751         VMSLTOpcode =
752             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
753         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
754                                      : RISCV::PseudoVMSLT_VX_M4_MASK;
755         break;
756       case RISCVII::VLMUL::LMUL_8:
757         VMSLTOpcode =
758             IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
759         VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
760                                      : RISCV::PseudoVMSLT_VX_M8_MASK;
761         break;
762       }
763       // Mask operations use the LMUL from the mask type.
764       switch (RISCVTargetLowering::getLMUL(VT)) {
765       default:
766         llvm_unreachable("Unexpected LMUL!");
767       case RISCVII::VLMUL::LMUL_F8:
768         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
769         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
770         break;
771       case RISCVII::VLMUL::LMUL_F4:
772         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
773         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
774         break;
775       case RISCVII::VLMUL::LMUL_F2:
776         VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
777         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
778         break;
779       case RISCVII::VLMUL::LMUL_1:
780         VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
781         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
782         break;
783       case RISCVII::VLMUL::LMUL_2:
784         VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
785         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
786         break;
787       case RISCVII::VLMUL::LMUL_4:
788         VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
789         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
790         break;
791       case RISCVII::VLMUL::LMUL_8:
792         VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
793         VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
794         break;
795       }
796       SDValue SEW = CurDAG->getTargetConstant(
797           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
798       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
799       SDValue VL;
800       selectVLOp(Node->getOperand(5), VL);
801       SDValue MaskedOff = Node->getOperand(1);
802       SDValue Mask = Node->getOperand(4);
803       // If the MaskedOff value and the Mask are the same value use
804       // vmslt{u}.vx vt, va, x;  vmandnot.mm vd, vd, vt
805       // This avoids needing to copy v0 to vd before starting the next sequence.
806       if (Mask == MaskedOff) {
807         SDValue Cmp = SDValue(
808             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
809             0);
810         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
811                                                  {Mask, Cmp, VL, MaskSEW}));
812         return;
813       }
814 
815       // Mask needs to be copied to V0.
816       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
817                                            RISCV::V0, Mask, SDValue());
818       SDValue Glue = Chain.getValue(1);
819       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
820 
821       // Otherwise use
822       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
823       SDValue Cmp = SDValue(
824           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
825                                  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
826           0);
827       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
828                                                {Cmp, Mask, VL, MaskSEW}));
829       return;
830     }
831     }
832     break;
833   }
834   case ISD::INTRINSIC_W_CHAIN: {
835     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
836     switch (IntNo) {
837       // By default we do not custom select any intrinsic.
838     default:
839       break;
840 
841     case Intrinsic::riscv_vsetvli:
842     case Intrinsic::riscv_vsetvlimax: {
843       if (!Subtarget->hasStdExtV())
844         break;
845 
846       bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
847       unsigned Offset = VLMax ? 2 : 3;
848 
849       assert(Node->getNumOperands() == Offset + 2 &&
850              "Unexpected number of operands");
851 
852       unsigned SEW =
853           RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
854       RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
855           Node->getConstantOperandVal(Offset + 1) & 0x7);
856 
857       unsigned VTypeI = RISCVVType::encodeVTYPE(
858           VLMul, SEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false);
859       SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
860 
861       SDValue VLOperand;
862       if (VLMax) {
863         VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
864       } else {
865         VLOperand = Node->getOperand(2);
866 
867         if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
868           uint64_t AVL = C->getZExtValue();
869           if (isUInt<5>(AVL)) {
870             SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
871             ReplaceNode(
872                 Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, XLenVT,
873                                              MVT::Other, VLImm, VTypeIOp,
874                                              /* Chain */ Node->getOperand(0)));
875             return;
876           }
877         }
878       }
879 
880       ReplaceNode(Node,
881                   CurDAG->getMachineNode(RISCV::PseudoVSETVLI, DL, XLenVT,
882                                          MVT::Other, VLOperand, VTypeIOp,
883                                          /* Chain */ Node->getOperand(0)));
884       return;
885     }
886     case Intrinsic::riscv_vlseg2:
887     case Intrinsic::riscv_vlseg3:
888     case Intrinsic::riscv_vlseg4:
889     case Intrinsic::riscv_vlseg5:
890     case Intrinsic::riscv_vlseg6:
891     case Intrinsic::riscv_vlseg7:
892     case Intrinsic::riscv_vlseg8: {
893       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
894       return;
895     }
896     case Intrinsic::riscv_vlseg2_mask:
897     case Intrinsic::riscv_vlseg3_mask:
898     case Intrinsic::riscv_vlseg4_mask:
899     case Intrinsic::riscv_vlseg5_mask:
900     case Intrinsic::riscv_vlseg6_mask:
901     case Intrinsic::riscv_vlseg7_mask:
902     case Intrinsic::riscv_vlseg8_mask: {
903       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
904       return;
905     }
906     case Intrinsic::riscv_vlsseg2:
907     case Intrinsic::riscv_vlsseg3:
908     case Intrinsic::riscv_vlsseg4:
909     case Intrinsic::riscv_vlsseg5:
910     case Intrinsic::riscv_vlsseg6:
911     case Intrinsic::riscv_vlsseg7:
912     case Intrinsic::riscv_vlsseg8: {
913       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
914       return;
915     }
916     case Intrinsic::riscv_vlsseg2_mask:
917     case Intrinsic::riscv_vlsseg3_mask:
918     case Intrinsic::riscv_vlsseg4_mask:
919     case Intrinsic::riscv_vlsseg5_mask:
920     case Intrinsic::riscv_vlsseg6_mask:
921     case Intrinsic::riscv_vlsseg7_mask:
922     case Intrinsic::riscv_vlsseg8_mask: {
923       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
924       return;
925     }
926     case Intrinsic::riscv_vloxseg2:
927     case Intrinsic::riscv_vloxseg3:
928     case Intrinsic::riscv_vloxseg4:
929     case Intrinsic::riscv_vloxseg5:
930     case Intrinsic::riscv_vloxseg6:
931     case Intrinsic::riscv_vloxseg7:
932     case Intrinsic::riscv_vloxseg8:
933       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
934       return;
935     case Intrinsic::riscv_vluxseg2:
936     case Intrinsic::riscv_vluxseg3:
937     case Intrinsic::riscv_vluxseg4:
938     case Intrinsic::riscv_vluxseg5:
939     case Intrinsic::riscv_vluxseg6:
940     case Intrinsic::riscv_vluxseg7:
941     case Intrinsic::riscv_vluxseg8:
942       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
943       return;
944     case Intrinsic::riscv_vloxseg2_mask:
945     case Intrinsic::riscv_vloxseg3_mask:
946     case Intrinsic::riscv_vloxseg4_mask:
947     case Intrinsic::riscv_vloxseg5_mask:
948     case Intrinsic::riscv_vloxseg6_mask:
949     case Intrinsic::riscv_vloxseg7_mask:
950     case Intrinsic::riscv_vloxseg8_mask:
951       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
952       return;
953     case Intrinsic::riscv_vluxseg2_mask:
954     case Intrinsic::riscv_vluxseg3_mask:
955     case Intrinsic::riscv_vluxseg4_mask:
956     case Intrinsic::riscv_vluxseg5_mask:
957     case Intrinsic::riscv_vluxseg6_mask:
958     case Intrinsic::riscv_vluxseg7_mask:
959     case Intrinsic::riscv_vluxseg8_mask:
960       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
961       return;
962     case Intrinsic::riscv_vlseg8ff:
963     case Intrinsic::riscv_vlseg7ff:
964     case Intrinsic::riscv_vlseg6ff:
965     case Intrinsic::riscv_vlseg5ff:
966     case Intrinsic::riscv_vlseg4ff:
967     case Intrinsic::riscv_vlseg3ff:
968     case Intrinsic::riscv_vlseg2ff: {
969       selectVLSEGFF(Node, /*IsMasked*/ false);
970       return;
971     }
972     case Intrinsic::riscv_vlseg8ff_mask:
973     case Intrinsic::riscv_vlseg7ff_mask:
974     case Intrinsic::riscv_vlseg6ff_mask:
975     case Intrinsic::riscv_vlseg5ff_mask:
976     case Intrinsic::riscv_vlseg4ff_mask:
977     case Intrinsic::riscv_vlseg3ff_mask:
978     case Intrinsic::riscv_vlseg2ff_mask: {
979       selectVLSEGFF(Node, /*IsMasked*/ true);
980       return;
981     }
982     case Intrinsic::riscv_vloxei:
983     case Intrinsic::riscv_vloxei_mask:
984     case Intrinsic::riscv_vluxei:
985     case Intrinsic::riscv_vluxei_mask: {
986       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
987                       IntNo == Intrinsic::riscv_vluxei_mask;
988       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
989                        IntNo == Intrinsic::riscv_vloxei_mask;
990 
991       MVT VT = Node->getSimpleValueType(0);
992       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
993 
994       unsigned CurOp = 2;
995       SmallVector<SDValue, 8> Operands;
996       if (IsMasked)
997         Operands.push_back(Node->getOperand(CurOp++));
998 
999       MVT IndexVT;
1000       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1001                                  /*IsStridedOrIndexed*/ true, Operands,
1002                                  &IndexVT);
1003 
1004       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1005              "Element count mismatch");
1006 
1007       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1008       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1009       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1010       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
1011           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1012           static_cast<unsigned>(IndexLMUL));
1013       MachineSDNode *Load =
1014           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1015 
1016       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1017         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1018 
1019       ReplaceNode(Node, Load);
1020       return;
1021     }
1022     case Intrinsic::riscv_vle1:
1023     case Intrinsic::riscv_vle:
1024     case Intrinsic::riscv_vle_mask:
1025     case Intrinsic::riscv_vlse:
1026     case Intrinsic::riscv_vlse_mask: {
1027       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1028                       IntNo == Intrinsic::riscv_vlse_mask;
1029       bool IsStrided =
1030           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1031 
1032       MVT VT = Node->getSimpleValueType(0);
1033       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1034 
1035       unsigned CurOp = 2;
1036       SmallVector<SDValue, 8> Operands;
1037       if (IsMasked)
1038         Operands.push_back(Node->getOperand(CurOp++));
1039 
1040       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1041                                  Operands);
1042 
1043       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1044       const RISCV::VLEPseudo *P =
1045           RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
1046                               static_cast<unsigned>(LMUL));
1047       MachineSDNode *Load =
1048           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1049 
1050       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1051         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1052 
1053       ReplaceNode(Node, Load);
1054       return;
1055     }
1056     case Intrinsic::riscv_vleff:
1057     case Intrinsic::riscv_vleff_mask: {
1058       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1059 
1060       MVT VT = Node->getSimpleValueType(0);
1061       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1062 
1063       unsigned CurOp = 2;
1064       SmallVector<SDValue, 7> Operands;
1065       if (IsMasked)
1066         Operands.push_back(Node->getOperand(CurOp++));
1067 
1068       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1069                                  /*IsStridedOrIndexed*/ false, Operands);
1070 
1071       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1072       const RISCV::VLEPseudo *P =
1073           RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW,
1074                               static_cast<unsigned>(LMUL));
1075       MachineSDNode *Load =
1076           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
1077                                  MVT::Other, MVT::Glue, Operands);
1078       SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
1079                                               /*Glue*/ SDValue(Load, 2));
1080 
1081       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1082         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1083 
1084       ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
1085       ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
1086       ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
1087       CurDAG->RemoveDeadNode(Node);
1088       return;
1089     }
1090     }
1091     break;
1092   }
1093   case ISD::INTRINSIC_VOID: {
1094     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1095     switch (IntNo) {
1096     case Intrinsic::riscv_vsseg2:
1097     case Intrinsic::riscv_vsseg3:
1098     case Intrinsic::riscv_vsseg4:
1099     case Intrinsic::riscv_vsseg5:
1100     case Intrinsic::riscv_vsseg6:
1101     case Intrinsic::riscv_vsseg7:
1102     case Intrinsic::riscv_vsseg8: {
1103       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1104       return;
1105     }
1106     case Intrinsic::riscv_vsseg2_mask:
1107     case Intrinsic::riscv_vsseg3_mask:
1108     case Intrinsic::riscv_vsseg4_mask:
1109     case Intrinsic::riscv_vsseg5_mask:
1110     case Intrinsic::riscv_vsseg6_mask:
1111     case Intrinsic::riscv_vsseg7_mask:
1112     case Intrinsic::riscv_vsseg8_mask: {
1113       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1114       return;
1115     }
1116     case Intrinsic::riscv_vssseg2:
1117     case Intrinsic::riscv_vssseg3:
1118     case Intrinsic::riscv_vssseg4:
1119     case Intrinsic::riscv_vssseg5:
1120     case Intrinsic::riscv_vssseg6:
1121     case Intrinsic::riscv_vssseg7:
1122     case Intrinsic::riscv_vssseg8: {
1123       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1124       return;
1125     }
1126     case Intrinsic::riscv_vssseg2_mask:
1127     case Intrinsic::riscv_vssseg3_mask:
1128     case Intrinsic::riscv_vssseg4_mask:
1129     case Intrinsic::riscv_vssseg5_mask:
1130     case Intrinsic::riscv_vssseg6_mask:
1131     case Intrinsic::riscv_vssseg7_mask:
1132     case Intrinsic::riscv_vssseg8_mask: {
1133       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1134       return;
1135     }
1136     case Intrinsic::riscv_vsoxseg2:
1137     case Intrinsic::riscv_vsoxseg3:
1138     case Intrinsic::riscv_vsoxseg4:
1139     case Intrinsic::riscv_vsoxseg5:
1140     case Intrinsic::riscv_vsoxseg6:
1141     case Intrinsic::riscv_vsoxseg7:
1142     case Intrinsic::riscv_vsoxseg8:
1143       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1144       return;
1145     case Intrinsic::riscv_vsuxseg2:
1146     case Intrinsic::riscv_vsuxseg3:
1147     case Intrinsic::riscv_vsuxseg4:
1148     case Intrinsic::riscv_vsuxseg5:
1149     case Intrinsic::riscv_vsuxseg6:
1150     case Intrinsic::riscv_vsuxseg7:
1151     case Intrinsic::riscv_vsuxseg8:
1152       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1153       return;
1154     case Intrinsic::riscv_vsoxseg2_mask:
1155     case Intrinsic::riscv_vsoxseg3_mask:
1156     case Intrinsic::riscv_vsoxseg4_mask:
1157     case Intrinsic::riscv_vsoxseg5_mask:
1158     case Intrinsic::riscv_vsoxseg6_mask:
1159     case Intrinsic::riscv_vsoxseg7_mask:
1160     case Intrinsic::riscv_vsoxseg8_mask:
1161       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1162       return;
1163     case Intrinsic::riscv_vsuxseg2_mask:
1164     case Intrinsic::riscv_vsuxseg3_mask:
1165     case Intrinsic::riscv_vsuxseg4_mask:
1166     case Intrinsic::riscv_vsuxseg5_mask:
1167     case Intrinsic::riscv_vsuxseg6_mask:
1168     case Intrinsic::riscv_vsuxseg7_mask:
1169     case Intrinsic::riscv_vsuxseg8_mask:
1170       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1171       return;
1172     case Intrinsic::riscv_vsoxei:
1173     case Intrinsic::riscv_vsoxei_mask:
1174     case Intrinsic::riscv_vsuxei:
1175     case Intrinsic::riscv_vsuxei_mask: {
1176       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1177                       IntNo == Intrinsic::riscv_vsuxei_mask;
1178       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1179                        IntNo == Intrinsic::riscv_vsoxei_mask;
1180 
1181       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1182       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1183 
1184       unsigned CurOp = 2;
1185       SmallVector<SDValue, 8> Operands;
1186       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1187 
1188       MVT IndexVT;
1189       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1190                                  /*IsStridedOrIndexed*/ true, Operands,
1191                                  &IndexVT);
1192 
1193       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1194              "Element count mismatch");
1195 
1196       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1197       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1198       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
1199       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
1200           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1201           static_cast<unsigned>(IndexLMUL));
1202       MachineSDNode *Store =
1203           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1204 
1205       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1206         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1207 
1208       ReplaceNode(Node, Store);
1209       return;
1210     }
1211     case Intrinsic::riscv_vse1:
1212     case Intrinsic::riscv_vse:
1213     case Intrinsic::riscv_vse_mask:
1214     case Intrinsic::riscv_vsse:
1215     case Intrinsic::riscv_vsse_mask: {
1216       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1217                       IntNo == Intrinsic::riscv_vsse_mask;
1218       bool IsStrided =
1219           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1220 
1221       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1222       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1223 
1224       unsigned CurOp = 2;
1225       SmallVector<SDValue, 8> Operands;
1226       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1227 
1228       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1229                                  Operands);
1230 
1231       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1232       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1233           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1234       MachineSDNode *Store =
1235           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1236       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1237         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1238 
1239       ReplaceNode(Node, Store);
1240       return;
1241     }
1242     }
1243     break;
1244   }
1245   case ISD::BITCAST: {
1246     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1247     // Just drop bitcasts between vectors if both are fixed or both are
1248     // scalable.
1249     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1250         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1251       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1252       CurDAG->RemoveDeadNode(Node);
1253       return;
1254     }
1255     break;
1256   }
1257   case ISD::INSERT_SUBVECTOR: {
1258     SDValue V = Node->getOperand(0);
1259     SDValue SubV = Node->getOperand(1);
1260     SDLoc DL(SubV);
1261     auto Idx = Node->getConstantOperandVal(2);
1262     MVT SubVecVT = SubV.getSimpleValueType();
1263 
1264     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1265     MVT SubVecContainerVT = SubVecVT;
1266     // Establish the correct scalable-vector types for any fixed-length type.
1267     if (SubVecVT.isFixedLengthVector())
1268       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1269     if (VT.isFixedLengthVector())
1270       VT = TLI.getContainerForFixedLengthVector(VT);
1271 
1272     const auto *TRI = Subtarget->getRegisterInfo();
1273     unsigned SubRegIdx;
1274     std::tie(SubRegIdx, Idx) =
1275         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1276             VT, SubVecContainerVT, Idx, TRI);
1277 
1278     // If the Idx hasn't been completely eliminated then this is a subvector
1279     // insert which doesn't naturally align to a vector register. These must
1280     // be handled using instructions to manipulate the vector registers.
1281     if (Idx != 0)
1282       break;
1283 
1284     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1285     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1286                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1287                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1288     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1289     assert((!IsSubVecPartReg || V.isUndef()) &&
1290            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1291            "the subvector is smaller than a full-sized register");
1292 
1293     // If we haven't set a SubRegIdx, then we must be going between
1294     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1295     if (SubRegIdx == RISCV::NoSubRegister) {
1296       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1297       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1298                  InRegClassID &&
1299              "Unexpected subvector extraction");
1300       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1301       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1302                                                DL, VT, SubV, RC);
1303       ReplaceNode(Node, NewNode);
1304       return;
1305     }
1306 
1307     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1308     ReplaceNode(Node, Insert.getNode());
1309     return;
1310   }
1311   case ISD::EXTRACT_SUBVECTOR: {
1312     SDValue V = Node->getOperand(0);
1313     auto Idx = Node->getConstantOperandVal(1);
1314     MVT InVT = V.getSimpleValueType();
1315     SDLoc DL(V);
1316 
1317     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1318     MVT SubVecContainerVT = VT;
1319     // Establish the correct scalable-vector types for any fixed-length type.
1320     if (VT.isFixedLengthVector())
1321       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1322     if (InVT.isFixedLengthVector())
1323       InVT = TLI.getContainerForFixedLengthVector(InVT);
1324 
1325     const auto *TRI = Subtarget->getRegisterInfo();
1326     unsigned SubRegIdx;
1327     std::tie(SubRegIdx, Idx) =
1328         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1329             InVT, SubVecContainerVT, Idx, TRI);
1330 
1331     // If the Idx hasn't been completely eliminated then this is a subvector
1332     // extract which doesn't naturally align to a vector register. These must
1333     // be handled using instructions to manipulate the vector registers.
1334     if (Idx != 0)
1335       break;
1336 
1337     // If we haven't set a SubRegIdx, then we must be going between
1338     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1339     if (SubRegIdx == RISCV::NoSubRegister) {
1340       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1341       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1342                  InRegClassID &&
1343              "Unexpected subvector extraction");
1344       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1345       SDNode *NewNode =
1346           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1347       ReplaceNode(Node, NewNode);
1348       return;
1349     }
1350 
1351     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1352     ReplaceNode(Node, Extract.getNode());
1353     return;
1354   }
1355   case RISCVISD::VMV_V_X_VL:
1356   case RISCVISD::VFMV_V_F_VL: {
1357     // Try to match splat of a scalar load to a strided load with stride of x0.
1358     SDValue Src = Node->getOperand(0);
1359     auto *Ld = dyn_cast<LoadSDNode>(Src);
1360     if (!Ld)
1361       break;
1362     EVT MemVT = Ld->getMemoryVT();
1363     // The memory VT should be the same size as the element type.
1364     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1365       break;
1366     if (!IsProfitableToFold(Src, Node, Node) ||
1367         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1368       break;
1369 
1370     SDValue VL;
1371     selectVLOp(Node->getOperand(1), VL);
1372 
1373     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1374     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1375 
1376     SDValue Operands[] = {Ld->getBasePtr(),
1377                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1378                           Ld->getChain()};
1379 
1380     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1381     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
1382         /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW,
1383         static_cast<unsigned>(LMUL));
1384     MachineSDNode *Load =
1385         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1386 
1387     if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1388       CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1389 
1390     ReplaceNode(Node, Load);
1391     return;
1392   }
1393   }
1394 
1395   // Select the default instruction.
1396   SelectCode(Node);
1397 }
1398 
1399 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
1400     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
1401   switch (ConstraintID) {
1402   case InlineAsm::Constraint_m:
1403     // We just support simple memory operands that have a single address
1404     // operand and need no special handling.
1405     OutOps.push_back(Op);
1406     return false;
1407   case InlineAsm::Constraint_A:
1408     OutOps.push_back(Op);
1409     return false;
1410   default:
1411     break;
1412   }
1413 
1414   return true;
1415 }
1416 
1417 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) {
1418   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
1419     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1420     return true;
1421   }
1422   return false;
1423 }
1424 
1425 bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1426   // If this is FrameIndex, select it directly. Otherwise just let it get
1427   // selected to a register independently.
1428   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1429     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1430   else
1431     Base = Addr;
1432   return true;
1433 }
1434 
1435 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1436                                         SDValue &ShAmt) {
1437   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1438   // amount. If there is an AND on the shift amount, we can bypass it if it
1439   // doesn't affect any of those bits.
1440   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1441     const APInt &AndMask = N->getConstantOperandAPInt(1);
1442 
1443     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1444     // mask that covers the bits needed to represent all shift amounts.
1445     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1446     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1447 
1448     if (ShMask.isSubsetOf(AndMask)) {
1449       ShAmt = N.getOperand(0);
1450       return true;
1451     }
1452 
1453     // SimplifyDemandedBits may have optimized the mask so try restoring any
1454     // bits that are known zero.
1455     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1456     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1457       ShAmt = N.getOperand(0);
1458       return true;
1459     }
1460   }
1461 
1462   ShAmt = N;
1463   return true;
1464 }
1465 
1466 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
1467   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
1468       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
1469     Val = N.getOperand(0);
1470     return true;
1471   }
1472   MVT VT = N.getSimpleValueType();
1473   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
1474     Val = N;
1475     return true;
1476   }
1477 
1478   return false;
1479 }
1480 
1481 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
1482   if (N.getOpcode() == ISD::AND) {
1483     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
1484     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
1485       Val = N.getOperand(0);
1486       return true;
1487     }
1488   }
1489   MVT VT = N.getSimpleValueType();
1490   APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
1491   if (CurDAG->MaskedValueIsZero(N, Mask)) {
1492     Val = N;
1493     return true;
1494   }
1495 
1496   return false;
1497 }
1498 
1499 // Select VL as a 5 bit immediate or a value that will become a register. This
1500 // allows us to choose betwen VSETIVLI or VSETVLI later.
1501 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
1502   auto *C = dyn_cast<ConstantSDNode>(N);
1503   if (C && isUInt<5>(C->getZExtValue()))
1504     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
1505                                    N->getValueType(0));
1506   else
1507     VL = N;
1508 
1509   return true;
1510 }
1511 
1512 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
1513   if (N.getOpcode() != ISD::SPLAT_VECTOR &&
1514       N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1515       N.getOpcode() != RISCVISD::VMV_V_X_VL)
1516     return false;
1517   SplatVal = N.getOperand(0);
1518   return true;
1519 }
1520 
1521 using ValidateFn = bool (*)(int64_t);
1522 
1523 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
1524                                    SelectionDAG &DAG,
1525                                    const RISCVSubtarget &Subtarget,
1526                                    ValidateFn ValidateImm) {
1527   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1528        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1529        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1530       !isa<ConstantSDNode>(N.getOperand(0)))
1531     return false;
1532 
1533   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1534 
1535   // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
1536   // share semantics when the operand type is wider than the resulting vector
1537   // element type: an implicit truncation first takes place. Therefore, perform
1538   // a manual truncation/sign-extension in order to ignore any truncated bits
1539   // and catch any zero-extended immediate.
1540   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
1541   // sign-extending to (XLenVT -1).
1542   MVT XLenVT = Subtarget.getXLenVT();
1543   assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
1544          "Unexpected splat operand type");
1545   MVT EltVT = N.getSimpleValueType().getVectorElementType();
1546   if (EltVT.bitsLT(XLenVT))
1547     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
1548 
1549   if (!ValidateImm(SplatImm))
1550     return false;
1551 
1552   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
1553   return true;
1554 }
1555 
1556 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
1557   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
1558                                 [](int64_t Imm) { return isInt<5>(Imm); });
1559 }
1560 
1561 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
1562   return selectVSplatSimmHelper(
1563       N, SplatVal, *CurDAG, *Subtarget,
1564       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
1565 }
1566 
1567 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
1568                                                       SDValue &SplatVal) {
1569   return selectVSplatSimmHelper(
1570       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
1571         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
1572       });
1573 }
1574 
1575 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
1576   if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
1577        N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
1578        N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
1579       !isa<ConstantSDNode>(N.getOperand(0)))
1580     return false;
1581 
1582   int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
1583 
1584   if (!isUInt<5>(SplatImm))
1585     return false;
1586 
1587   SplatVal =
1588       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
1589 
1590   return true;
1591 }
1592 
1593 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
1594                                        SDValue &Imm) {
1595   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
1596     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
1597 
1598     if (!isInt<5>(ImmVal))
1599       return false;
1600 
1601     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
1602     return true;
1603   }
1604 
1605   return false;
1606 }
1607 
1608 // Merge an ADDI into the offset of a load/store instruction where possible.
1609 // (load (addi base, off1), off2) -> (load base, off1+off2)
1610 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
1611 // This is possible when off1+off2 fits a 12-bit immediate.
1612 void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() {
1613   SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
1614   ++Position;
1615 
1616   while (Position != CurDAG->allnodes_begin()) {
1617     SDNode *N = &*--Position;
1618     // Skip dead nodes and any non-machine opcodes.
1619     if (N->use_empty() || !N->isMachineOpcode())
1620       continue;
1621 
1622     int OffsetOpIdx;
1623     int BaseOpIdx;
1624 
1625     // Only attempt this optimisation for I-type loads and S-type stores.
1626     switch (N->getMachineOpcode()) {
1627     default:
1628       continue;
1629     case RISCV::LB:
1630     case RISCV::LH:
1631     case RISCV::LW:
1632     case RISCV::LBU:
1633     case RISCV::LHU:
1634     case RISCV::LWU:
1635     case RISCV::LD:
1636     case RISCV::FLH:
1637     case RISCV::FLW:
1638     case RISCV::FLD:
1639       BaseOpIdx = 0;
1640       OffsetOpIdx = 1;
1641       break;
1642     case RISCV::SB:
1643     case RISCV::SH:
1644     case RISCV::SW:
1645     case RISCV::SD:
1646     case RISCV::FSH:
1647     case RISCV::FSW:
1648     case RISCV::FSD:
1649       BaseOpIdx = 1;
1650       OffsetOpIdx = 2;
1651       break;
1652     }
1653 
1654     if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
1655       continue;
1656 
1657     SDValue Base = N->getOperand(BaseOpIdx);
1658 
1659     // If the base is an ADDI, we can merge it in to the load/store.
1660     if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI)
1661       continue;
1662 
1663     SDValue ImmOperand = Base.getOperand(1);
1664     uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
1665 
1666     if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
1667       int64_t Offset1 = Const->getSExtValue();
1668       int64_t CombinedOffset = Offset1 + Offset2;
1669       if (!isInt<12>(CombinedOffset))
1670         continue;
1671       ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
1672                                              ImmOperand.getValueType());
1673     } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
1674       // If the off1 in (addi base, off1) is a global variable's address (its
1675       // low part, really), then we can rely on the alignment of that variable
1676       // to provide a margin of safety before off1 can overflow the 12 bits.
1677       // Check if off2 falls within that margin; if so off1+off2 can't overflow.
1678       const DataLayout &DL = CurDAG->getDataLayout();
1679       Align Alignment = GA->getGlobal()->getPointerAlignment(DL);
1680       if (Offset2 != 0 && Alignment <= Offset2)
1681         continue;
1682       int64_t Offset1 = GA->getOffset();
1683       int64_t CombinedOffset = Offset1 + Offset2;
1684       ImmOperand = CurDAG->getTargetGlobalAddress(
1685           GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
1686           CombinedOffset, GA->getTargetFlags());
1687     } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
1688       // Ditto.
1689       Align Alignment = CP->getAlign();
1690       if (Offset2 != 0 && Alignment <= Offset2)
1691         continue;
1692       int64_t Offset1 = CP->getOffset();
1693       int64_t CombinedOffset = Offset1 + Offset2;
1694       ImmOperand = CurDAG->getTargetConstantPool(
1695           CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
1696           CombinedOffset, CP->getTargetFlags());
1697     } else {
1698       continue;
1699     }
1700 
1701     LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
1702     LLVM_DEBUG(Base->dump(CurDAG));
1703     LLVM_DEBUG(dbgs() << "\nN: ");
1704     LLVM_DEBUG(N->dump(CurDAG));
1705     LLVM_DEBUG(dbgs() << "\n");
1706 
1707     // Modify the offset operand of the load/store.
1708     if (BaseOpIdx == 0) // Load
1709       CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
1710                                  N->getOperand(2));
1711     else // Store
1712       CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
1713                                  ImmOperand, N->getOperand(3));
1714 
1715     // The add-immediate may now be dead, in which case remove it.
1716     if (Base.getNode()->use_empty())
1717       CurDAG->RemoveDeadNode(Base.getNode());
1718   }
1719 }
1720 
1721 // This pass converts a legalized DAG into a RISCV-specific DAG, ready
1722 // for instruction scheduling.
1723 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
1724   return new RISCVDAGToDAGISel(TM);
1725 }
1726