xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (revision 81ad626541db97eb356e2c1d4a20eb2a26a766ab)
10b57cec5SDimitry Andric //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file defines an instruction selector for the RISCV target.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
120b57cec5SDimitry Andric 
135ffd83dbSDimitry Andric #include "RISCVISelDAGToDAG.h"
140b57cec5SDimitry Andric #include "MCTargetDesc/RISCVMCTargetDesc.h"
15e8d8bef9SDimitry Andric #include "MCTargetDesc/RISCVMatInt.h"
16fe6060f1SDimitry Andric #include "RISCVISelLowering.h"
17fe6060f1SDimitry Andric #include "RISCVMachineFunctionInfo.h"
180b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFrameInfo.h"
19e8d8bef9SDimitry Andric #include "llvm/IR/IntrinsicsRISCV.h"
205ffd83dbSDimitry Andric #include "llvm/Support/Alignment.h"
210b57cec5SDimitry Andric #include "llvm/Support/Debug.h"
22fe6060f1SDimitry Andric #include "llvm/Support/KnownBits.h"
230b57cec5SDimitry Andric #include "llvm/Support/MathExtras.h"
240b57cec5SDimitry Andric #include "llvm/Support/raw_ostream.h"
255ffd83dbSDimitry Andric 
260b57cec5SDimitry Andric using namespace llvm;
270b57cec5SDimitry Andric 
280b57cec5SDimitry Andric #define DEBUG_TYPE "riscv-isel"
290b57cec5SDimitry Andric 
30fe6060f1SDimitry Andric namespace llvm {
31fe6060f1SDimitry Andric namespace RISCV {
32fe6060f1SDimitry Andric #define GET_RISCVVSSEGTable_IMPL
33fe6060f1SDimitry Andric #define GET_RISCVVLSEGTable_IMPL
34fe6060f1SDimitry Andric #define GET_RISCVVLXSEGTable_IMPL
35fe6060f1SDimitry Andric #define GET_RISCVVSXSEGTable_IMPL
36fe6060f1SDimitry Andric #define GET_RISCVVLETable_IMPL
37fe6060f1SDimitry Andric #define GET_RISCVVSETable_IMPL
38fe6060f1SDimitry Andric #define GET_RISCVVLXTable_IMPL
39fe6060f1SDimitry Andric #define GET_RISCVVSXTable_IMPL
40*81ad6265SDimitry Andric #define GET_RISCVMaskedPseudosTable_IMPL
41fe6060f1SDimitry Andric #include "RISCVGenSearchableTables.inc"
42fe6060f1SDimitry Andric } // namespace RISCV
43fe6060f1SDimitry Andric } // namespace llvm
44fe6060f1SDimitry Andric 
45fe6060f1SDimitry Andric void RISCVDAGToDAGISel::PreprocessISelDAG() {
46fe6060f1SDimitry Andric   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
47fe6060f1SDimitry Andric                                        E = CurDAG->allnodes_end();
48fe6060f1SDimitry Andric        I != E;) {
49fe6060f1SDimitry Andric     SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
50fe6060f1SDimitry Andric 
51*81ad6265SDimitry Andric     // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
52*81ad6265SDimitry Andric     // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
53*81ad6265SDimitry Andric     if (N->getOpcode() == ISD::SPLAT_VECTOR) {
54*81ad6265SDimitry Andric       MVT VT = N->getSimpleValueType(0);
55*81ad6265SDimitry Andric       unsigned Opc =
56*81ad6265SDimitry Andric           VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
57*81ad6265SDimitry Andric       SDLoc DL(N);
58*81ad6265SDimitry Andric       SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
59*81ad6265SDimitry Andric       SDValue Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
60*81ad6265SDimitry Andric                                        N->getOperand(0), VL);
61*81ad6265SDimitry Andric 
62*81ad6265SDimitry Andric       --I;
63*81ad6265SDimitry Andric       CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
64*81ad6265SDimitry Andric       ++I;
65*81ad6265SDimitry Andric       CurDAG->DeleteNode(N);
66*81ad6265SDimitry Andric       continue;
67*81ad6265SDimitry Andric     }
68*81ad6265SDimitry Andric 
69fe6060f1SDimitry Andric     // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
70fe6060f1SDimitry Andric     // load. Done after lowering and combining so that we have a chance to
71fe6060f1SDimitry Andric     // optimize this to VMV_V_X_VL when the upper bits aren't needed.
72fe6060f1SDimitry Andric     if (N->getOpcode() != RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL)
73fe6060f1SDimitry Andric       continue;
74fe6060f1SDimitry Andric 
75*81ad6265SDimitry Andric     assert(N->getNumOperands() == 4 && "Unexpected number of operands");
76fe6060f1SDimitry Andric     MVT VT = N->getSimpleValueType(0);
77*81ad6265SDimitry Andric     SDValue Passthru = N->getOperand(0);
78*81ad6265SDimitry Andric     SDValue Lo = N->getOperand(1);
79*81ad6265SDimitry Andric     SDValue Hi = N->getOperand(2);
80*81ad6265SDimitry Andric     SDValue VL = N->getOperand(3);
81fe6060f1SDimitry Andric     assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
82fe6060f1SDimitry Andric            Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
83fe6060f1SDimitry Andric            "Unexpected VTs!");
84fe6060f1SDimitry Andric     MachineFunction &MF = CurDAG->getMachineFunction();
85fe6060f1SDimitry Andric     RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
86fe6060f1SDimitry Andric     SDLoc DL(N);
87fe6060f1SDimitry Andric 
88fe6060f1SDimitry Andric     // We use the same frame index we use for moving two i32s into 64-bit FPR.
89fe6060f1SDimitry Andric     // This is an analogous operation.
90fe6060f1SDimitry Andric     int FI = FuncInfo->getMoveF64FrameIndex(MF);
91fe6060f1SDimitry Andric     MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
92fe6060f1SDimitry Andric     const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
93fe6060f1SDimitry Andric     SDValue StackSlot =
94fe6060f1SDimitry Andric         CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
95fe6060f1SDimitry Andric 
96fe6060f1SDimitry Andric     SDValue Chain = CurDAG->getEntryNode();
97fe6060f1SDimitry Andric     Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
98fe6060f1SDimitry Andric 
99fe6060f1SDimitry Andric     SDValue OffsetSlot =
100fe6060f1SDimitry Andric         CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
101fe6060f1SDimitry Andric     Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
102fe6060f1SDimitry Andric                           Align(8));
103fe6060f1SDimitry Andric 
104fe6060f1SDimitry Andric     Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
105fe6060f1SDimitry Andric 
106fe6060f1SDimitry Andric     SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
107fe6060f1SDimitry Andric     SDValue IntID =
108fe6060f1SDimitry Andric         CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
10904eeddc0SDimitry Andric     SDValue Ops[] = {Chain,
11004eeddc0SDimitry Andric                      IntID,
111*81ad6265SDimitry Andric                      Passthru,
11204eeddc0SDimitry Andric                      StackSlot,
11304eeddc0SDimitry Andric                      CurDAG->getRegister(RISCV::X0, MVT::i64),
11404eeddc0SDimitry Andric                      VL};
115fe6060f1SDimitry Andric 
116fe6060f1SDimitry Andric     SDValue Result = CurDAG->getMemIntrinsicNode(
117fe6060f1SDimitry Andric         ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8),
118fe6060f1SDimitry Andric         MachineMemOperand::MOLoad);
119fe6060f1SDimitry Andric 
120fe6060f1SDimitry Andric     // We're about to replace all uses of the SPLAT_VECTOR_SPLIT_I64 with the
121fe6060f1SDimitry Andric     // vlse we created.  This will cause general havok on the dag because
122fe6060f1SDimitry Andric     // anything below the conversion could be folded into other existing nodes.
123fe6060f1SDimitry Andric     // To avoid invalidating 'I', back it up to the convert node.
124fe6060f1SDimitry Andric     --I;
125fe6060f1SDimitry Andric     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
126fe6060f1SDimitry Andric 
127fe6060f1SDimitry Andric     // Now that we did that, the node is dead.  Increment the iterator to the
128fe6060f1SDimitry Andric     // next node to process, then delete N.
129fe6060f1SDimitry Andric     ++I;
130fe6060f1SDimitry Andric     CurDAG->DeleteNode(N);
131fe6060f1SDimitry Andric   }
132fe6060f1SDimitry Andric }
133fe6060f1SDimitry Andric 
1340b57cec5SDimitry Andric void RISCVDAGToDAGISel::PostprocessISelDAG() {
135*81ad6265SDimitry Andric   HandleSDNode Dummy(CurDAG->getRoot());
136349cc55cSDimitry Andric   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
137349cc55cSDimitry Andric 
138349cc55cSDimitry Andric   bool MadeChange = false;
139349cc55cSDimitry Andric   while (Position != CurDAG->allnodes_begin()) {
140349cc55cSDimitry Andric     SDNode *N = &*--Position;
141349cc55cSDimitry Andric     // Skip dead nodes and any non-machine opcodes.
142349cc55cSDimitry Andric     if (N->use_empty() || !N->isMachineOpcode())
143349cc55cSDimitry Andric       continue;
144349cc55cSDimitry Andric 
145349cc55cSDimitry Andric     MadeChange |= doPeepholeSExtW(N);
146349cc55cSDimitry Andric     MadeChange |= doPeepholeLoadStoreADDI(N);
147*81ad6265SDimitry Andric     MadeChange |= doPeepholeMaskedRVV(N);
148349cc55cSDimitry Andric   }
149349cc55cSDimitry Andric 
150*81ad6265SDimitry Andric   CurDAG->setRoot(Dummy.getValue());
151*81ad6265SDimitry Andric 
152349cc55cSDimitry Andric   if (MadeChange)
153349cc55cSDimitry Andric     CurDAG->RemoveDeadNodes();
1540b57cec5SDimitry Andric }
1550b57cec5SDimitry Andric 
156*81ad6265SDimitry Andric // Returns true if N is a MachineSDNode that has a reg and simm12 memory
157*81ad6265SDimitry Andric // operand. The indices of the base pointer and offset are returned in BaseOpIdx
158*81ad6265SDimitry Andric // and OffsetOpIdx.
159*81ad6265SDimitry Andric static bool hasMemOffset(SDNode *N, unsigned &BaseOpIdx,
160*81ad6265SDimitry Andric                          unsigned &OffsetOpIdx) {
161*81ad6265SDimitry Andric   switch (N->getMachineOpcode()) {
162*81ad6265SDimitry Andric   case RISCV::LB:
163*81ad6265SDimitry Andric   case RISCV::LH:
164*81ad6265SDimitry Andric   case RISCV::LW:
165*81ad6265SDimitry Andric   case RISCV::LBU:
166*81ad6265SDimitry Andric   case RISCV::LHU:
167*81ad6265SDimitry Andric   case RISCV::LWU:
168*81ad6265SDimitry Andric   case RISCV::LD:
169*81ad6265SDimitry Andric   case RISCV::FLH:
170*81ad6265SDimitry Andric   case RISCV::FLW:
171*81ad6265SDimitry Andric   case RISCV::FLD:
172*81ad6265SDimitry Andric     BaseOpIdx = 0;
173*81ad6265SDimitry Andric     OffsetOpIdx = 1;
174*81ad6265SDimitry Andric     return true;
175*81ad6265SDimitry Andric   case RISCV::SB:
176*81ad6265SDimitry Andric   case RISCV::SH:
177*81ad6265SDimitry Andric   case RISCV::SW:
178*81ad6265SDimitry Andric   case RISCV::SD:
179*81ad6265SDimitry Andric   case RISCV::FSH:
180*81ad6265SDimitry Andric   case RISCV::FSW:
181*81ad6265SDimitry Andric   case RISCV::FSD:
182*81ad6265SDimitry Andric     BaseOpIdx = 1;
183*81ad6265SDimitry Andric     OffsetOpIdx = 2;
184*81ad6265SDimitry Andric     return true;
18504eeddc0SDimitry Andric   }
18604eeddc0SDimitry Andric 
187*81ad6265SDimitry Andric   return false;
188*81ad6265SDimitry Andric }
1890b57cec5SDimitry Andric 
190*81ad6265SDimitry Andric static SDNode *selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
191*81ad6265SDimitry Andric                             RISCVMatInt::InstSeq &Seq) {
1928bcb0991SDimitry Andric   SDNode *Result = nullptr;
193*81ad6265SDimitry Andric   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT);
1940b57cec5SDimitry Andric   for (RISCVMatInt::Inst &Inst : Seq) {
195*81ad6265SDimitry Andric     SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, VT);
196*81ad6265SDimitry Andric     switch (Inst.getOpndKind()) {
197*81ad6265SDimitry Andric     case RISCVMatInt::Imm:
198*81ad6265SDimitry Andric       Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SDImm);
199*81ad6265SDimitry Andric       break;
200*81ad6265SDimitry Andric     case RISCVMatInt::RegX0:
201*81ad6265SDimitry Andric       Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg,
202*81ad6265SDimitry Andric                                       CurDAG->getRegister(RISCV::X0, VT));
203*81ad6265SDimitry Andric       break;
204*81ad6265SDimitry Andric     case RISCVMatInt::RegReg:
205*81ad6265SDimitry Andric       Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, SrcReg);
206*81ad6265SDimitry Andric       break;
207*81ad6265SDimitry Andric     case RISCVMatInt::RegImm:
208*81ad6265SDimitry Andric       Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, SDImm);
209*81ad6265SDimitry Andric       break;
210*81ad6265SDimitry Andric     }
2110b57cec5SDimitry Andric 
2120b57cec5SDimitry Andric     // Only the first instruction has X0 as its source.
2130b57cec5SDimitry Andric     SrcReg = SDValue(Result, 0);
2140b57cec5SDimitry Andric   }
2150b57cec5SDimitry Andric 
2160b57cec5SDimitry Andric   return Result;
2170b57cec5SDimitry Andric }
2180b57cec5SDimitry Andric 
219*81ad6265SDimitry Andric static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
220*81ad6265SDimitry Andric                          int64_t Imm, const RISCVSubtarget &Subtarget) {
221*81ad6265SDimitry Andric   RISCVMatInt::InstSeq Seq =
222*81ad6265SDimitry Andric       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
223*81ad6265SDimitry Andric 
224*81ad6265SDimitry Andric   return selectImmSeq(CurDAG, DL, VT, Seq);
225*81ad6265SDimitry Andric }
226*81ad6265SDimitry Andric 
227*81ad6265SDimitry Andric static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
228*81ad6265SDimitry Andric                            unsigned NF, RISCVII::VLMUL LMUL) {
229*81ad6265SDimitry Andric   static const unsigned M1TupleRegClassIDs[] = {
230*81ad6265SDimitry Andric       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
231*81ad6265SDimitry Andric       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
232*81ad6265SDimitry Andric       RISCV::VRN8M1RegClassID};
233*81ad6265SDimitry Andric   static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
234*81ad6265SDimitry Andric                                                 RISCV::VRN3M2RegClassID,
235*81ad6265SDimitry Andric                                                 RISCV::VRN4M2RegClassID};
236*81ad6265SDimitry Andric 
237e8d8bef9SDimitry Andric   assert(Regs.size() >= 2 && Regs.size() <= 8);
238e8d8bef9SDimitry Andric 
239*81ad6265SDimitry Andric   unsigned RegClassID;
240*81ad6265SDimitry Andric   unsigned SubReg0;
241*81ad6265SDimitry Andric   switch (LMUL) {
242*81ad6265SDimitry Andric   default:
243*81ad6265SDimitry Andric     llvm_unreachable("Invalid LMUL.");
244*81ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_F8:
245*81ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_F4:
246*81ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_F2:
247*81ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_1:
248*81ad6265SDimitry Andric     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
249*81ad6265SDimitry Andric                   "Unexpected subreg numbering");
250*81ad6265SDimitry Andric     SubReg0 = RISCV::sub_vrm1_0;
251*81ad6265SDimitry Andric     RegClassID = M1TupleRegClassIDs[NF - 2];
252*81ad6265SDimitry Andric     break;
253*81ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_2:
254*81ad6265SDimitry Andric     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
255*81ad6265SDimitry Andric                   "Unexpected subreg numbering");
256*81ad6265SDimitry Andric     SubReg0 = RISCV::sub_vrm2_0;
257*81ad6265SDimitry Andric     RegClassID = M2TupleRegClassIDs[NF - 2];
258*81ad6265SDimitry Andric     break;
259*81ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_4:
260*81ad6265SDimitry Andric     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
261*81ad6265SDimitry Andric                   "Unexpected subreg numbering");
262*81ad6265SDimitry Andric     SubReg0 = RISCV::sub_vrm4_0;
263*81ad6265SDimitry Andric     RegClassID = RISCV::VRN2M4RegClassID;
264*81ad6265SDimitry Andric     break;
265*81ad6265SDimitry Andric   }
266*81ad6265SDimitry Andric 
267e8d8bef9SDimitry Andric   SDLoc DL(Regs[0]);
268e8d8bef9SDimitry Andric   SmallVector<SDValue, 8> Ops;
269e8d8bef9SDimitry Andric 
270e8d8bef9SDimitry Andric   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
271e8d8bef9SDimitry Andric 
272e8d8bef9SDimitry Andric   for (unsigned I = 0; I < Regs.size(); ++I) {
273e8d8bef9SDimitry Andric     Ops.push_back(Regs[I]);
274e8d8bef9SDimitry Andric     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
275e8d8bef9SDimitry Andric   }
276e8d8bef9SDimitry Andric   SDNode *N =
277e8d8bef9SDimitry Andric       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
278e8d8bef9SDimitry Andric   return SDValue(N, 0);
279e8d8bef9SDimitry Andric }
280e8d8bef9SDimitry Andric 
281fe6060f1SDimitry Andric void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
282fe6060f1SDimitry Andric     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
283fe6060f1SDimitry Andric     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
284349cc55cSDimitry Andric     bool IsLoad, MVT *IndexVT) {
285fe6060f1SDimitry Andric   SDValue Chain = Node->getOperand(0);
286fe6060f1SDimitry Andric   SDValue Glue;
287fe6060f1SDimitry Andric 
288fe6060f1SDimitry Andric   SDValue Base;
289fe6060f1SDimitry Andric   SelectBaseAddr(Node->getOperand(CurOp++), Base);
290fe6060f1SDimitry Andric   Operands.push_back(Base); // Base pointer.
291fe6060f1SDimitry Andric 
292fe6060f1SDimitry Andric   if (IsStridedOrIndexed) {
293fe6060f1SDimitry Andric     Operands.push_back(Node->getOperand(CurOp++)); // Index.
294fe6060f1SDimitry Andric     if (IndexVT)
295fe6060f1SDimitry Andric       *IndexVT = Operands.back()->getSimpleValueType(0);
296fe6060f1SDimitry Andric   }
297fe6060f1SDimitry Andric 
298fe6060f1SDimitry Andric   if (IsMasked) {
299fe6060f1SDimitry Andric     // Mask needs to be copied to V0.
300fe6060f1SDimitry Andric     SDValue Mask = Node->getOperand(CurOp++);
301fe6060f1SDimitry Andric     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
302fe6060f1SDimitry Andric     Glue = Chain.getValue(1);
303fe6060f1SDimitry Andric     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
304fe6060f1SDimitry Andric   }
305fe6060f1SDimitry Andric   SDValue VL;
306fe6060f1SDimitry Andric   selectVLOp(Node->getOperand(CurOp++), VL);
307fe6060f1SDimitry Andric   Operands.push_back(VL);
308fe6060f1SDimitry Andric 
309fe6060f1SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
310fe6060f1SDimitry Andric   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
311fe6060f1SDimitry Andric   Operands.push_back(SEWOp);
312fe6060f1SDimitry Andric 
313349cc55cSDimitry Andric   // Masked load has the tail policy argument.
314349cc55cSDimitry Andric   if (IsMasked && IsLoad) {
315349cc55cSDimitry Andric     // Policy must be a constant.
316349cc55cSDimitry Andric     uint64_t Policy = Node->getConstantOperandVal(CurOp++);
317349cc55cSDimitry Andric     SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
318349cc55cSDimitry Andric     Operands.push_back(PolicyOp);
319349cc55cSDimitry Andric   }
320349cc55cSDimitry Andric 
321fe6060f1SDimitry Andric   Operands.push_back(Chain); // Chain.
322fe6060f1SDimitry Andric   if (Glue)
323fe6060f1SDimitry Andric     Operands.push_back(Glue);
324fe6060f1SDimitry Andric }
325fe6060f1SDimitry Andric 
326*81ad6265SDimitry Andric static bool isAllUndef(ArrayRef<SDValue> Values) {
327*81ad6265SDimitry Andric   return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
328*81ad6265SDimitry Andric }
329*81ad6265SDimitry Andric 
330fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
331e8d8bef9SDimitry Andric                                     bool IsStrided) {
332e8d8bef9SDimitry Andric   SDLoc DL(Node);
333e8d8bef9SDimitry Andric   unsigned NF = Node->getNumValues() - 1;
334fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
335fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
336fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
337e8d8bef9SDimitry Andric 
338fe6060f1SDimitry Andric   unsigned CurOp = 2;
339fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
340*81ad6265SDimitry Andric 
341fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
342fe6060f1SDimitry Andric                                Node->op_begin() + CurOp + NF);
343*81ad6265SDimitry Andric   bool IsTU = IsMasked || !isAllUndef(Regs);
344*81ad6265SDimitry Andric   if (IsTU) {
345*81ad6265SDimitry Andric     SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL);
346*81ad6265SDimitry Andric     Operands.push_back(Merge);
347e8d8bef9SDimitry Andric   }
348*81ad6265SDimitry Andric   CurOp += NF;
349fe6060f1SDimitry Andric 
350fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
351349cc55cSDimitry Andric                              Operands, /*IsLoad=*/true);
352fe6060f1SDimitry Andric 
353fe6060f1SDimitry Andric   const RISCV::VLSEGPseudo *P =
354*81ad6265SDimitry Andric       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
355fe6060f1SDimitry Andric                             static_cast<unsigned>(LMUL));
356fe6060f1SDimitry Andric   MachineSDNode *Load =
357e8d8bef9SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
358fe6060f1SDimitry Andric 
359fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
360fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
361fe6060f1SDimitry Andric 
362e8d8bef9SDimitry Andric   SDValue SuperReg = SDValue(Load, 0);
363fe6060f1SDimitry Andric   for (unsigned I = 0; I < NF; ++I) {
364fe6060f1SDimitry Andric     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
365e8d8bef9SDimitry Andric     ReplaceUses(SDValue(Node, I),
366fe6060f1SDimitry Andric                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
367fe6060f1SDimitry Andric   }
368e8d8bef9SDimitry Andric 
369e8d8bef9SDimitry Andric   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
370e8d8bef9SDimitry Andric   CurDAG->RemoveDeadNode(Node);
371e8d8bef9SDimitry Andric }
372e8d8bef9SDimitry Andric 
373fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
374e8d8bef9SDimitry Andric   SDLoc DL(Node);
375fe6060f1SDimitry Andric   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
376fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
377e8d8bef9SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
378fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
379fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
380e8d8bef9SDimitry Andric 
381fe6060f1SDimitry Andric   unsigned CurOp = 2;
382e8d8bef9SDimitry Andric   SmallVector<SDValue, 7> Operands;
383*81ad6265SDimitry Andric 
384fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
385fe6060f1SDimitry Andric                                Node->op_begin() + CurOp + NF);
386*81ad6265SDimitry Andric   bool IsTU = IsMasked || !isAllUndef(Regs);
387*81ad6265SDimitry Andric   if (IsTU) {
388e8d8bef9SDimitry Andric     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
389fe6060f1SDimitry Andric     Operands.push_back(MaskedOff);
390fe6060f1SDimitry Andric   }
391*81ad6265SDimitry Andric   CurOp += NF;
392e8d8bef9SDimitry Andric 
393fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
394349cc55cSDimitry Andric                              /*IsStridedOrIndexed*/ false, Operands,
395349cc55cSDimitry Andric                              /*IsLoad=*/true);
396fe6060f1SDimitry Andric 
397fe6060f1SDimitry Andric   const RISCV::VLSEGPseudo *P =
398*81ad6265SDimitry Andric       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
399fe6060f1SDimitry Andric                             Log2SEW, static_cast<unsigned>(LMUL));
400fe6060f1SDimitry Andric   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
401*81ad6265SDimitry Andric                                                XLenVT, MVT::Other, Operands);
402fe6060f1SDimitry Andric 
403fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
404fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
405fe6060f1SDimitry Andric 
406e8d8bef9SDimitry Andric   SDValue SuperReg = SDValue(Load, 0);
407fe6060f1SDimitry Andric   for (unsigned I = 0; I < NF; ++I) {
408fe6060f1SDimitry Andric     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
409e8d8bef9SDimitry Andric     ReplaceUses(SDValue(Node, I),
410fe6060f1SDimitry Andric                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
411fe6060f1SDimitry Andric   }
412fe6060f1SDimitry Andric 
413*81ad6265SDimitry Andric   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));     // VL
414*81ad6265SDimitry Andric   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain
415fe6060f1SDimitry Andric   CurDAG->RemoveDeadNode(Node);
416fe6060f1SDimitry Andric }
417fe6060f1SDimitry Andric 
418fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
419fe6060f1SDimitry Andric                                      bool IsOrdered) {
420fe6060f1SDimitry Andric   SDLoc DL(Node);
421fe6060f1SDimitry Andric   unsigned NF = Node->getNumValues() - 1;
422fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
423fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
424fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
425fe6060f1SDimitry Andric 
426fe6060f1SDimitry Andric   unsigned CurOp = 2;
427fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
428*81ad6265SDimitry Andric 
429fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
430fe6060f1SDimitry Andric                                Node->op_begin() + CurOp + NF);
431*81ad6265SDimitry Andric   bool IsTU = IsMasked || !isAllUndef(Regs);
432*81ad6265SDimitry Andric   if (IsTU) {
433fe6060f1SDimitry Andric     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
434fe6060f1SDimitry Andric     Operands.push_back(MaskedOff);
435fe6060f1SDimitry Andric   }
436*81ad6265SDimitry Andric   CurOp += NF;
437fe6060f1SDimitry Andric 
438fe6060f1SDimitry Andric   MVT IndexVT;
439fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
440349cc55cSDimitry Andric                              /*IsStridedOrIndexed*/ true, Operands,
441349cc55cSDimitry Andric                              /*IsLoad=*/true, &IndexVT);
442fe6060f1SDimitry Andric 
443fe6060f1SDimitry Andric   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
444fe6060f1SDimitry Andric          "Element count mismatch");
445fe6060f1SDimitry Andric 
446fe6060f1SDimitry Andric   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
447fe6060f1SDimitry Andric   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
44804eeddc0SDimitry Andric   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
44904eeddc0SDimitry Andric     report_fatal_error("The V extension does not support EEW=64 for index "
45004eeddc0SDimitry Andric                        "values when XLEN=32");
45104eeddc0SDimitry Andric   }
452fe6060f1SDimitry Andric   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
453*81ad6265SDimitry Andric       NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
454fe6060f1SDimitry Andric       static_cast<unsigned>(IndexLMUL));
455fe6060f1SDimitry Andric   MachineSDNode *Load =
456fe6060f1SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
457fe6060f1SDimitry Andric 
458fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
459fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
460fe6060f1SDimitry Andric 
461fe6060f1SDimitry Andric   SDValue SuperReg = SDValue(Load, 0);
462fe6060f1SDimitry Andric   for (unsigned I = 0; I < NF; ++I) {
463fe6060f1SDimitry Andric     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
464fe6060f1SDimitry Andric     ReplaceUses(SDValue(Node, I),
465fe6060f1SDimitry Andric                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
466fe6060f1SDimitry Andric   }
467e8d8bef9SDimitry Andric 
468e8d8bef9SDimitry Andric   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
469e8d8bef9SDimitry Andric   CurDAG->RemoveDeadNode(Node);
470e8d8bef9SDimitry Andric }
471e8d8bef9SDimitry Andric 
472fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
473e8d8bef9SDimitry Andric                                     bool IsStrided) {
474e8d8bef9SDimitry Andric   SDLoc DL(Node);
475e8d8bef9SDimitry Andric   unsigned NF = Node->getNumOperands() - 4;
476e8d8bef9SDimitry Andric   if (IsStrided)
477e8d8bef9SDimitry Andric     NF--;
478fe6060f1SDimitry Andric   if (IsMasked)
479e8d8bef9SDimitry Andric     NF--;
480fe6060f1SDimitry Andric   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
481fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
482fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
483e8d8bef9SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
484e8d8bef9SDimitry Andric   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
485fe6060f1SDimitry Andric 
486fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
487e8d8bef9SDimitry Andric   Operands.push_back(StoreVal);
488fe6060f1SDimitry Andric   unsigned CurOp = 2 + NF;
489fe6060f1SDimitry Andric 
490fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
491fe6060f1SDimitry Andric                              Operands);
492fe6060f1SDimitry Andric 
493fe6060f1SDimitry Andric   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
494fe6060f1SDimitry Andric       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
495fe6060f1SDimitry Andric   MachineSDNode *Store =
496e8d8bef9SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
497fe6060f1SDimitry Andric 
498fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
499fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
500fe6060f1SDimitry Andric 
501e8d8bef9SDimitry Andric   ReplaceNode(Node, Store);
502e8d8bef9SDimitry Andric }
503e8d8bef9SDimitry Andric 
504fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
505fe6060f1SDimitry Andric                                      bool IsOrdered) {
506e8d8bef9SDimitry Andric   SDLoc DL(Node);
507e8d8bef9SDimitry Andric   unsigned NF = Node->getNumOperands() - 5;
508fe6060f1SDimitry Andric   if (IsMasked)
509fe6060f1SDimitry Andric     --NF;
510fe6060f1SDimitry Andric   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
511fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
512fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
513e8d8bef9SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
514e8d8bef9SDimitry Andric   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
515e8d8bef9SDimitry Andric 
516fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
517fe6060f1SDimitry Andric   Operands.push_back(StoreVal);
518fe6060f1SDimitry Andric   unsigned CurOp = 2 + NF;
519fe6060f1SDimitry Andric 
520fe6060f1SDimitry Andric   MVT IndexVT;
521fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
522349cc55cSDimitry Andric                              /*IsStridedOrIndexed*/ true, Operands,
523349cc55cSDimitry Andric                              /*IsLoad=*/false, &IndexVT);
524fe6060f1SDimitry Andric 
525fe6060f1SDimitry Andric   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
526fe6060f1SDimitry Andric          "Element count mismatch");
527fe6060f1SDimitry Andric 
528fe6060f1SDimitry Andric   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
529fe6060f1SDimitry Andric   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
53004eeddc0SDimitry Andric   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
53104eeddc0SDimitry Andric     report_fatal_error("The V extension does not support EEW=64 for index "
53204eeddc0SDimitry Andric                        "values when XLEN=32");
53304eeddc0SDimitry Andric   }
534fe6060f1SDimitry Andric   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
535fe6060f1SDimitry Andric       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
536e8d8bef9SDimitry Andric       static_cast<unsigned>(IndexLMUL));
537fe6060f1SDimitry Andric   MachineSDNode *Store =
538e8d8bef9SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
539fe6060f1SDimitry Andric 
540fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
541fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
542fe6060f1SDimitry Andric 
543e8d8bef9SDimitry Andric   ReplaceNode(Node, Store);
544e8d8bef9SDimitry Andric }
545e8d8bef9SDimitry Andric 
54604eeddc0SDimitry Andric void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
54704eeddc0SDimitry Andric   if (!Subtarget->hasVInstructions())
54804eeddc0SDimitry Andric     return;
54904eeddc0SDimitry Andric 
55004eeddc0SDimitry Andric   assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
55104eeddc0SDimitry Andric           Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
55204eeddc0SDimitry Andric          "Unexpected opcode");
55304eeddc0SDimitry Andric 
55404eeddc0SDimitry Andric   SDLoc DL(Node);
55504eeddc0SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
55604eeddc0SDimitry Andric 
55704eeddc0SDimitry Andric   bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
55804eeddc0SDimitry Andric   unsigned IntNoOffset = HasChain ? 1 : 0;
55904eeddc0SDimitry Andric   unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
56004eeddc0SDimitry Andric 
56104eeddc0SDimitry Andric   assert((IntNo == Intrinsic::riscv_vsetvli ||
56204eeddc0SDimitry Andric           IntNo == Intrinsic::riscv_vsetvlimax ||
56304eeddc0SDimitry Andric           IntNo == Intrinsic::riscv_vsetvli_opt ||
56404eeddc0SDimitry Andric           IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
56504eeddc0SDimitry Andric          "Unexpected vsetvli intrinsic");
56604eeddc0SDimitry Andric 
56704eeddc0SDimitry Andric   bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
56804eeddc0SDimitry Andric                IntNo == Intrinsic::riscv_vsetvlimax_opt;
56904eeddc0SDimitry Andric   unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
57004eeddc0SDimitry Andric 
57104eeddc0SDimitry Andric   assert(Node->getNumOperands() == Offset + 2 &&
57204eeddc0SDimitry Andric          "Unexpected number of operands");
57304eeddc0SDimitry Andric 
57404eeddc0SDimitry Andric   unsigned SEW =
57504eeddc0SDimitry Andric       RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
57604eeddc0SDimitry Andric   RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
57704eeddc0SDimitry Andric       Node->getConstantOperandVal(Offset + 1) & 0x7);
57804eeddc0SDimitry Andric 
57904eeddc0SDimitry Andric   unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
58004eeddc0SDimitry Andric                                             /*MaskAgnostic*/ false);
58104eeddc0SDimitry Andric   SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
58204eeddc0SDimitry Andric 
58304eeddc0SDimitry Andric   SmallVector<EVT, 2> VTs = {XLenVT};
58404eeddc0SDimitry Andric   if (HasChain)
58504eeddc0SDimitry Andric     VTs.push_back(MVT::Other);
58604eeddc0SDimitry Andric 
58704eeddc0SDimitry Andric   SDValue VLOperand;
58804eeddc0SDimitry Andric   unsigned Opcode = RISCV::PseudoVSETVLI;
58904eeddc0SDimitry Andric   if (VLMax) {
59004eeddc0SDimitry Andric     VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
59104eeddc0SDimitry Andric     Opcode = RISCV::PseudoVSETVLIX0;
59204eeddc0SDimitry Andric   } else {
59304eeddc0SDimitry Andric     VLOperand = Node->getOperand(IntNoOffset + 1);
59404eeddc0SDimitry Andric 
59504eeddc0SDimitry Andric     if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
59604eeddc0SDimitry Andric       uint64_t AVL = C->getZExtValue();
59704eeddc0SDimitry Andric       if (isUInt<5>(AVL)) {
59804eeddc0SDimitry Andric         SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
59904eeddc0SDimitry Andric         SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
60004eeddc0SDimitry Andric         if (HasChain)
60104eeddc0SDimitry Andric           Ops.push_back(Node->getOperand(0));
60204eeddc0SDimitry Andric         ReplaceNode(
60304eeddc0SDimitry Andric             Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
60404eeddc0SDimitry Andric         return;
60504eeddc0SDimitry Andric       }
60604eeddc0SDimitry Andric     }
60704eeddc0SDimitry Andric   }
60804eeddc0SDimitry Andric 
60904eeddc0SDimitry Andric   SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
61004eeddc0SDimitry Andric   if (HasChain)
61104eeddc0SDimitry Andric     Ops.push_back(Node->getOperand(0));
61204eeddc0SDimitry Andric 
61304eeddc0SDimitry Andric   ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
61404eeddc0SDimitry Andric }
6150b57cec5SDimitry Andric 
6160b57cec5SDimitry Andric void RISCVDAGToDAGISel::Select(SDNode *Node) {
6170b57cec5SDimitry Andric   // If we have a custom node, we have already selected.
6180b57cec5SDimitry Andric   if (Node->isMachineOpcode()) {
6190b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
6200b57cec5SDimitry Andric     Node->setNodeId(-1);
6210b57cec5SDimitry Andric     return;
6220b57cec5SDimitry Andric   }
6230b57cec5SDimitry Andric 
6240b57cec5SDimitry Andric   // Instruction Selection not handled by the auto-generated tablegen selection
6250b57cec5SDimitry Andric   // should be handled here.
6260b57cec5SDimitry Andric   unsigned Opcode = Node->getOpcode();
6270b57cec5SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
6280b57cec5SDimitry Andric   SDLoc DL(Node);
629fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
6300b57cec5SDimitry Andric 
6310b57cec5SDimitry Andric   switch (Opcode) {
6320b57cec5SDimitry Andric   case ISD::Constant: {
633fe6060f1SDimitry Andric     auto *ConstNode = cast<ConstantSDNode>(Node);
634349cc55cSDimitry Andric     if (VT == XLenVT && ConstNode->isZero()) {
635e8d8bef9SDimitry Andric       SDValue New =
636e8d8bef9SDimitry Andric           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
6370b57cec5SDimitry Andric       ReplaceNode(Node, New.getNode());
6380b57cec5SDimitry Andric       return;
6390b57cec5SDimitry Andric     }
640349cc55cSDimitry Andric     int64_t Imm = ConstNode->getSExtValue();
641349cc55cSDimitry Andric     // If the upper XLen-16 bits are not used, try to convert this to a simm12
642349cc55cSDimitry Andric     // by sign extending bit 15.
643*81ad6265SDimitry Andric     if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
644349cc55cSDimitry Andric         hasAllHUsers(Node))
645*81ad6265SDimitry Andric       Imm = SignExtend64<16>(Imm);
646349cc55cSDimitry Andric     // If the upper 32-bits are not used try to convert this into a simm32 by
647349cc55cSDimitry Andric     // sign extending bit 32.
648349cc55cSDimitry Andric     if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
649*81ad6265SDimitry Andric       Imm = SignExtend64<32>(Imm);
650349cc55cSDimitry Andric 
65104eeddc0SDimitry Andric     ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
6520b57cec5SDimitry Andric     return;
6530b57cec5SDimitry Andric   }
654*81ad6265SDimitry Andric   case ISD::ADD: {
655*81ad6265SDimitry Andric     // Try to select ADD + immediate used as memory addresses to
656*81ad6265SDimitry Andric     // (ADDI (ADD X, Imm-Lo12), Lo12) if it will allow the ADDI to be removed by
657*81ad6265SDimitry Andric     // doPeepholeLoadStoreADDI.
658*81ad6265SDimitry Andric 
659*81ad6265SDimitry Andric     // LHS should be an immediate.
660*81ad6265SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
661*81ad6265SDimitry Andric     if (!N1C)
662*81ad6265SDimitry Andric       break;
663*81ad6265SDimitry Andric 
664*81ad6265SDimitry Andric     int64_t Offset = N1C->getSExtValue();
665*81ad6265SDimitry Andric     int64_t Lo12 = SignExtend64<12>(Offset);
666*81ad6265SDimitry Andric 
667*81ad6265SDimitry Andric     // Don't do this if the lower 12 bits are 0 or we could use ADDI directly.
668*81ad6265SDimitry Andric     if (Lo12 == 0 || isInt<12>(Offset))
669*81ad6265SDimitry Andric       break;
670*81ad6265SDimitry Andric 
671*81ad6265SDimitry Andric     // Don't do this if we can use a pair of ADDIs.
672*81ad6265SDimitry Andric     if (isInt<12>(Offset / 2) && isInt<12>(Offset - Offset / 2))
673*81ad6265SDimitry Andric       break;
674*81ad6265SDimitry Andric 
675*81ad6265SDimitry Andric     RISCVMatInt::InstSeq Seq =
676*81ad6265SDimitry Andric         RISCVMatInt::generateInstSeq(Offset, Subtarget->getFeatureBits());
677*81ad6265SDimitry Andric 
678*81ad6265SDimitry Andric     Offset -= Lo12;
679*81ad6265SDimitry Andric     // Restore sign bits for RV32.
680*81ad6265SDimitry Andric     if (!Subtarget->is64Bit())
681*81ad6265SDimitry Andric       Offset = SignExtend64<32>(Offset);
682*81ad6265SDimitry Andric 
683*81ad6265SDimitry Andric     // We can fold if the last operation is an ADDI or its an ADDIW that could
684*81ad6265SDimitry Andric     // be treated as an ADDI.
685*81ad6265SDimitry Andric     if (Seq.back().Opc != RISCV::ADDI &&
686*81ad6265SDimitry Andric         !(Seq.back().Opc == RISCV::ADDIW && isInt<32>(Offset)))
687*81ad6265SDimitry Andric       break;
688*81ad6265SDimitry Andric     assert(Seq.back().Imm == Lo12 && "Expected immediate to match Lo12");
689*81ad6265SDimitry Andric     // Drop the last operation.
690*81ad6265SDimitry Andric     Seq.pop_back();
691*81ad6265SDimitry Andric     assert(!Seq.empty() && "Expected more instructions in sequence");
692*81ad6265SDimitry Andric 
693*81ad6265SDimitry Andric     bool AllPointerUses = true;
694*81ad6265SDimitry Andric     for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
695*81ad6265SDimitry Andric       SDNode *User = *UI;
696*81ad6265SDimitry Andric 
697*81ad6265SDimitry Andric       // Is this user a memory instruction that uses a register and immediate
698*81ad6265SDimitry Andric       // that has this ADD as its pointer.
699*81ad6265SDimitry Andric       unsigned BaseOpIdx, OffsetOpIdx;
700*81ad6265SDimitry Andric       if (!User->isMachineOpcode() ||
701*81ad6265SDimitry Andric           !hasMemOffset(User, BaseOpIdx, OffsetOpIdx) ||
702*81ad6265SDimitry Andric           UI.getOperandNo() != BaseOpIdx) {
703*81ad6265SDimitry Andric         AllPointerUses = false;
704*81ad6265SDimitry Andric         break;
705*81ad6265SDimitry Andric       }
706*81ad6265SDimitry Andric 
707*81ad6265SDimitry Andric       // If the memory instruction already has an offset, make sure the combined
708*81ad6265SDimitry Andric       // offset is foldable.
709*81ad6265SDimitry Andric       int64_t MemOffs =
710*81ad6265SDimitry Andric           cast<ConstantSDNode>(User->getOperand(OffsetOpIdx))->getSExtValue();
711*81ad6265SDimitry Andric       MemOffs += Lo12;
712*81ad6265SDimitry Andric       if (!isInt<12>(MemOffs)) {
713*81ad6265SDimitry Andric         AllPointerUses = false;
714*81ad6265SDimitry Andric         break;
715*81ad6265SDimitry Andric       }
716*81ad6265SDimitry Andric     }
717*81ad6265SDimitry Andric 
718*81ad6265SDimitry Andric     if (!AllPointerUses)
719*81ad6265SDimitry Andric       break;
720*81ad6265SDimitry Andric 
721*81ad6265SDimitry Andric     // Emit (ADDI (ADD X, Hi), Lo)
722*81ad6265SDimitry Andric     SDNode *Imm = selectImmSeq(CurDAG, DL, VT, Seq);
723*81ad6265SDimitry Andric     SDNode *ADD = CurDAG->getMachineNode(RISCV::ADD, DL, VT,
724*81ad6265SDimitry Andric                                          Node->getOperand(0), SDValue(Imm, 0));
725*81ad6265SDimitry Andric     SDNode *ADDI =
726*81ad6265SDimitry Andric         CurDAG->getMachineNode(RISCV::ADDI, DL, VT, SDValue(ADD, 0),
727*81ad6265SDimitry Andric                                CurDAG->getTargetConstant(Lo12, DL, VT));
728*81ad6265SDimitry Andric     ReplaceNode(Node, ADDI);
7290b57cec5SDimitry Andric     return;
7300b57cec5SDimitry Andric   }
731*81ad6265SDimitry Andric   case ISD::SHL: {
732fe6060f1SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
73304eeddc0SDimitry Andric     if (!N1C)
73404eeddc0SDimitry Andric       break;
735fe6060f1SDimitry Andric     SDValue N0 = Node->getOperand(0);
73604eeddc0SDimitry Andric     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
73704eeddc0SDimitry Andric         !isa<ConstantSDNode>(N0.getOperand(1)))
73804eeddc0SDimitry Andric       break;
73904eeddc0SDimitry Andric     unsigned ShAmt = N1C->getZExtValue();
740fe6060f1SDimitry Andric     uint64_t Mask = N0.getConstantOperandVal(1);
741*81ad6265SDimitry Andric 
742*81ad6265SDimitry Andric     // Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has
743*81ad6265SDimitry Andric     // 32 leading zeros and C3 trailing zeros.
744*81ad6265SDimitry Andric     if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
745*81ad6265SDimitry Andric       unsigned XLen = Subtarget->getXLen();
746*81ad6265SDimitry Andric       unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask));
747*81ad6265SDimitry Andric       unsigned TrailingZeros = countTrailingZeros(Mask);
748*81ad6265SDimitry Andric       if (TrailingZeros > 0 && LeadingZeros == 32) {
749*81ad6265SDimitry Andric         SDNode *SRLIW = CurDAG->getMachineNode(
750*81ad6265SDimitry Andric             RISCV::SRLIW, DL, VT, N0->getOperand(0),
751*81ad6265SDimitry Andric             CurDAG->getTargetConstant(TrailingZeros, DL, VT));
752*81ad6265SDimitry Andric         SDNode *SLLI = CurDAG->getMachineNode(
753*81ad6265SDimitry Andric             RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
754*81ad6265SDimitry Andric             CurDAG->getTargetConstant(TrailingZeros + ShAmt, DL, VT));
755*81ad6265SDimitry Andric         ReplaceNode(Node, SLLI);
756*81ad6265SDimitry Andric         return;
757*81ad6265SDimitry Andric       }
758*81ad6265SDimitry Andric     }
759*81ad6265SDimitry Andric     break;
760*81ad6265SDimitry Andric   }
761*81ad6265SDimitry Andric   case ISD::SRL: {
762*81ad6265SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
763*81ad6265SDimitry Andric     if (!N1C)
764*81ad6265SDimitry Andric       break;
765*81ad6265SDimitry Andric     SDValue N0 = Node->getOperand(0);
766*81ad6265SDimitry Andric     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
767*81ad6265SDimitry Andric         !isa<ConstantSDNode>(N0.getOperand(1)))
768*81ad6265SDimitry Andric       break;
769*81ad6265SDimitry Andric     unsigned ShAmt = N1C->getZExtValue();
770*81ad6265SDimitry Andric     uint64_t Mask = N0.getConstantOperandVal(1);
771*81ad6265SDimitry Andric 
772*81ad6265SDimitry Andric     // Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
773*81ad6265SDimitry Andric     // 32 leading zeros and C3 trailing zeros.
774*81ad6265SDimitry Andric     if (isShiftedMask_64(Mask)) {
775*81ad6265SDimitry Andric       unsigned XLen = Subtarget->getXLen();
776*81ad6265SDimitry Andric       unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask));
777*81ad6265SDimitry Andric       unsigned TrailingZeros = countTrailingZeros(Mask);
778*81ad6265SDimitry Andric       if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
779*81ad6265SDimitry Andric         SDNode *SRLIW = CurDAG->getMachineNode(
780*81ad6265SDimitry Andric             RISCV::SRLIW, DL, VT, N0->getOperand(0),
781*81ad6265SDimitry Andric             CurDAG->getTargetConstant(TrailingZeros, DL, VT));
782*81ad6265SDimitry Andric         SDNode *SLLI = CurDAG->getMachineNode(
783*81ad6265SDimitry Andric             RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
784*81ad6265SDimitry Andric             CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT));
785*81ad6265SDimitry Andric         ReplaceNode(Node, SLLI);
786*81ad6265SDimitry Andric         return;
787*81ad6265SDimitry Andric       }
788*81ad6265SDimitry Andric     }
789*81ad6265SDimitry Andric 
790*81ad6265SDimitry Andric     // Optimize (srl (and X, C2), C) ->
791*81ad6265SDimitry Andric     //          (srli (slli X, (XLen-C3), (XLen-C3) + C)
792*81ad6265SDimitry Andric     // Where C2 is a mask with C3 trailing ones.
793*81ad6265SDimitry Andric     // Taking into account that the C2 may have had lower bits unset by
794*81ad6265SDimitry Andric     // SimplifyDemandedBits. This avoids materializing the C2 immediate.
795*81ad6265SDimitry Andric     // This pattern occurs when type legalizing right shifts for types with
796*81ad6265SDimitry Andric     // less than XLen bits.
797fe6060f1SDimitry Andric     Mask |= maskTrailingOnes<uint64_t>(ShAmt);
79804eeddc0SDimitry Andric     if (!isMask_64(Mask))
79904eeddc0SDimitry Andric       break;
80004eeddc0SDimitry Andric     unsigned TrailingOnes = countTrailingOnes(Mask);
80104eeddc0SDimitry Andric     // 32 trailing ones should use srliw via tablegen pattern.
80204eeddc0SDimitry Andric     if (TrailingOnes == 32 || ShAmt >= TrailingOnes)
80304eeddc0SDimitry Andric       break;
80404eeddc0SDimitry Andric     unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
805fe6060f1SDimitry Andric     SDNode *SLLI =
806fe6060f1SDimitry Andric         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
807fe6060f1SDimitry Andric                                CurDAG->getTargetConstant(LShAmt, DL, VT));
808fe6060f1SDimitry Andric     SDNode *SRLI = CurDAG->getMachineNode(
809fe6060f1SDimitry Andric         RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
810fe6060f1SDimitry Andric         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
811fe6060f1SDimitry Andric     ReplaceNode(Node, SRLI);
812fe6060f1SDimitry Andric     return;
813fe6060f1SDimitry Andric   }
81404eeddc0SDimitry Andric   case ISD::SRA: {
81504eeddc0SDimitry Andric     // Optimize (sra (sext_inreg X, i16), C) ->
81604eeddc0SDimitry Andric     //          (srai (slli X, (XLen-16), (XLen-16) + C)
81704eeddc0SDimitry Andric     // And      (sra (sext_inreg X, i8), C) ->
81804eeddc0SDimitry Andric     //          (srai (slli X, (XLen-8), (XLen-8) + C)
81904eeddc0SDimitry Andric     // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
82004eeddc0SDimitry Andric     // This transform matches the code we get without Zbb. The shifts are more
82104eeddc0SDimitry Andric     // compressible, and this can help expose CSE opportunities in the sdiv by
82204eeddc0SDimitry Andric     // constant optimization.
82304eeddc0SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
82404eeddc0SDimitry Andric     if (!N1C)
825fe6060f1SDimitry Andric       break;
82604eeddc0SDimitry Andric     SDValue N0 = Node->getOperand(0);
82704eeddc0SDimitry Andric     if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
82804eeddc0SDimitry Andric       break;
82904eeddc0SDimitry Andric     unsigned ShAmt = N1C->getZExtValue();
83004eeddc0SDimitry Andric     unsigned ExtSize =
83104eeddc0SDimitry Andric         cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
83204eeddc0SDimitry Andric     // ExtSize of 32 should use sraiw via tablegen pattern.
83304eeddc0SDimitry Andric     if (ExtSize >= 32 || ShAmt >= ExtSize)
83404eeddc0SDimitry Andric       break;
83504eeddc0SDimitry Andric     unsigned LShAmt = Subtarget->getXLen() - ExtSize;
83604eeddc0SDimitry Andric     SDNode *SLLI =
83704eeddc0SDimitry Andric         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
83804eeddc0SDimitry Andric                                CurDAG->getTargetConstant(LShAmt, DL, VT));
83904eeddc0SDimitry Andric     SDNode *SRAI = CurDAG->getMachineNode(
84004eeddc0SDimitry Andric         RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
84104eeddc0SDimitry Andric         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
84204eeddc0SDimitry Andric     ReplaceNode(Node, SRAI);
84304eeddc0SDimitry Andric     return;
844fe6060f1SDimitry Andric   }
845fe6060f1SDimitry Andric   case ISD::AND: {
846fe6060f1SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
847fe6060f1SDimitry Andric     if (!N1C)
848fe6060f1SDimitry Andric       break;
849fe6060f1SDimitry Andric 
850fe6060f1SDimitry Andric     SDValue N0 = Node->getOperand(0);
851fe6060f1SDimitry Andric 
852fe6060f1SDimitry Andric     bool LeftShift = N0.getOpcode() == ISD::SHL;
853fe6060f1SDimitry Andric     if (!LeftShift && N0.getOpcode() != ISD::SRL)
854fe6060f1SDimitry Andric       break;
855fe6060f1SDimitry Andric 
856fe6060f1SDimitry Andric     auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
857fe6060f1SDimitry Andric     if (!C)
858fe6060f1SDimitry Andric       break;
859fe6060f1SDimitry Andric     uint64_t C2 = C->getZExtValue();
860fe6060f1SDimitry Andric     unsigned XLen = Subtarget->getXLen();
861fe6060f1SDimitry Andric     if (!C2 || C2 >= XLen)
862fe6060f1SDimitry Andric       break;
863fe6060f1SDimitry Andric 
864fe6060f1SDimitry Andric     uint64_t C1 = N1C->getZExtValue();
865fe6060f1SDimitry Andric 
866*81ad6265SDimitry Andric     // Keep track of whether this is a c.andi. If we can't use c.andi, the
867*81ad6265SDimitry Andric     // shift pair might offer more compression opportunities.
868*81ad6265SDimitry Andric     // TODO: We could check for C extension here, but we don't have many lit
869*81ad6265SDimitry Andric     // tests with the C extension enabled so not checking gets better coverage.
870*81ad6265SDimitry Andric     // TODO: What if ANDI faster than shift?
871*81ad6265SDimitry Andric     bool IsCANDI = isInt<6>(N1C->getSExtValue());
872fe6060f1SDimitry Andric 
873fe6060f1SDimitry Andric     // Clear irrelevant bits in the mask.
874fe6060f1SDimitry Andric     if (LeftShift)
875fe6060f1SDimitry Andric       C1 &= maskTrailingZeros<uint64_t>(C2);
876fe6060f1SDimitry Andric     else
877fe6060f1SDimitry Andric       C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
878fe6060f1SDimitry Andric 
879fe6060f1SDimitry Andric     // Some transforms should only be done if the shift has a single use or
880fe6060f1SDimitry Andric     // the AND would become (srli (slli X, 32), 32)
881fe6060f1SDimitry Andric     bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
882fe6060f1SDimitry Andric 
883fe6060f1SDimitry Andric     SDValue X = N0.getOperand(0);
884fe6060f1SDimitry Andric 
885fe6060f1SDimitry Andric     // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
886fe6060f1SDimitry Andric     // with c3 leading zeros.
887fe6060f1SDimitry Andric     if (!LeftShift && isMask_64(C1)) {
888fe6060f1SDimitry Andric       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
889fe6060f1SDimitry Andric       if (C2 < C3) {
890fe6060f1SDimitry Andric         // If the number of leading zeros is C2+32 this can be SRLIW.
891fe6060f1SDimitry Andric         if (C2 + 32 == C3) {
892*81ad6265SDimitry Andric           SDNode *SRLIW = CurDAG->getMachineNode(
893*81ad6265SDimitry Andric               RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
894fe6060f1SDimitry Andric           ReplaceNode(Node, SRLIW);
895fe6060f1SDimitry Andric           return;
896fe6060f1SDimitry Andric         }
897fe6060f1SDimitry Andric 
898fe6060f1SDimitry Andric         // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if
899fe6060f1SDimitry Andric         // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
900fe6060f1SDimitry Andric         //
901fe6060f1SDimitry Andric         // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
902fe6060f1SDimitry Andric         // legalized and goes through DAG combine.
903fe6060f1SDimitry Andric         if (C2 >= 32 && (C3 - C2) == 1 && N0.hasOneUse() &&
904*81ad6265SDimitry Andric             X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
905*81ad6265SDimitry Andric             cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
906fe6060f1SDimitry Andric           SDNode *SRAIW =
907*81ad6265SDimitry Andric               CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
908*81ad6265SDimitry Andric                                      CurDAG->getTargetConstant(31, DL, VT));
909fe6060f1SDimitry Andric           SDNode *SRLIW = CurDAG->getMachineNode(
910*81ad6265SDimitry Andric               RISCV::SRLIW, DL, VT, SDValue(SRAIW, 0),
911*81ad6265SDimitry Andric               CurDAG->getTargetConstant(C3 - 32, DL, VT));
912fe6060f1SDimitry Andric           ReplaceNode(Node, SRLIW);
913fe6060f1SDimitry Andric           return;
914fe6060f1SDimitry Andric         }
915fe6060f1SDimitry Andric 
916fe6060f1SDimitry Andric         // (srli (slli x, c3-c2), c3).
917*81ad6265SDimitry Andric         // Skip if we could use (zext.w (sraiw X, C2)).
918*81ad6265SDimitry Andric         bool Skip = Subtarget->hasStdExtZba() && C3 == 32 &&
919*81ad6265SDimitry Andric                     X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
920*81ad6265SDimitry Andric                     cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
921*81ad6265SDimitry Andric         // Also Skip if we can use bexti.
922*81ad6265SDimitry Andric         Skip |= Subtarget->hasStdExtZbs() && C3 == XLen - 1;
923*81ad6265SDimitry Andric         if (OneUseOrZExtW && !Skip) {
924fe6060f1SDimitry Andric           SDNode *SLLI = CurDAG->getMachineNode(
925*81ad6265SDimitry Andric               RISCV::SLLI, DL, VT, X,
926*81ad6265SDimitry Andric               CurDAG->getTargetConstant(C3 - C2, DL, VT));
927fe6060f1SDimitry Andric           SDNode *SRLI =
928*81ad6265SDimitry Andric               CurDAG->getMachineNode(RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
929*81ad6265SDimitry Andric                                      CurDAG->getTargetConstant(C3, DL, VT));
930fe6060f1SDimitry Andric           ReplaceNode(Node, SRLI);
931fe6060f1SDimitry Andric           return;
932fe6060f1SDimitry Andric         }
933fe6060f1SDimitry Andric       }
934fe6060f1SDimitry Andric     }
935fe6060f1SDimitry Andric 
936349cc55cSDimitry Andric     // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
937fe6060f1SDimitry Andric     // shifted by c2 bits with c3 leading zeros.
938fe6060f1SDimitry Andric     if (LeftShift && isShiftedMask_64(C1)) {
939fe6060f1SDimitry Andric       uint64_t C3 = XLen - (64 - countLeadingZeros(C1));
940fe6060f1SDimitry Andric 
941fe6060f1SDimitry Andric       if (C2 + C3 < XLen &&
942fe6060f1SDimitry Andric           C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + C3)) << C2)) {
943fe6060f1SDimitry Andric         // Use slli.uw when possible.
944fe6060f1SDimitry Andric         if ((XLen - (C2 + C3)) == 32 && Subtarget->hasStdExtZba()) {
945*81ad6265SDimitry Andric           SDNode *SLLI_UW = CurDAG->getMachineNode(
946*81ad6265SDimitry Andric               RISCV::SLLI_UW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
9471fd87a68SDimitry Andric           ReplaceNode(Node, SLLI_UW);
948fe6060f1SDimitry Andric           return;
949fe6060f1SDimitry Andric         }
950fe6060f1SDimitry Andric 
951fe6060f1SDimitry Andric         // (srli (slli c2+c3), c3)
952*81ad6265SDimitry Andric         if (OneUseOrZExtW && !IsCANDI) {
953fe6060f1SDimitry Andric           SDNode *SLLI = CurDAG->getMachineNode(
954*81ad6265SDimitry Andric               RISCV::SLLI, DL, VT, X,
955*81ad6265SDimitry Andric               CurDAG->getTargetConstant(C2 + C3, DL, VT));
956fe6060f1SDimitry Andric           SDNode *SRLI =
957*81ad6265SDimitry Andric               CurDAG->getMachineNode(RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
958*81ad6265SDimitry Andric                                      CurDAG->getTargetConstant(C3, DL, VT));
959fe6060f1SDimitry Andric           ReplaceNode(Node, SRLI);
960fe6060f1SDimitry Andric           return;
961fe6060f1SDimitry Andric         }
962fe6060f1SDimitry Andric       }
963fe6060f1SDimitry Andric     }
964fe6060f1SDimitry Andric 
965349cc55cSDimitry Andric     // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
966349cc55cSDimitry Andric     // shifted mask with c2 leading zeros and c3 trailing zeros.
967349cc55cSDimitry Andric     if (!LeftShift && isShiftedMask_64(C1)) {
968349cc55cSDimitry Andric       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
969349cc55cSDimitry Andric       uint64_t C3 = countTrailingZeros(C1);
970*81ad6265SDimitry Andric       if (Leading == C2 && C2 + C3 < XLen && OneUseOrZExtW && !IsCANDI) {
971*81ad6265SDimitry Andric         unsigned SrliOpc = RISCV::SRLI;
972*81ad6265SDimitry Andric         // If the input is zexti32 we should use SRLIW.
973*81ad6265SDimitry Andric         if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
974*81ad6265SDimitry Andric             X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
975*81ad6265SDimitry Andric           SrliOpc = RISCV::SRLIW;
976*81ad6265SDimitry Andric           X = X.getOperand(0);
977*81ad6265SDimitry Andric         }
978349cc55cSDimitry Andric         SDNode *SRLI = CurDAG->getMachineNode(
979*81ad6265SDimitry Andric             SrliOpc, DL, VT, X, CurDAG->getTargetConstant(C2 + C3, DL, VT));
980349cc55cSDimitry Andric         SDNode *SLLI =
981*81ad6265SDimitry Andric             CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
982*81ad6265SDimitry Andric                                    CurDAG->getTargetConstant(C3, DL, VT));
983349cc55cSDimitry Andric         ReplaceNode(Node, SLLI);
984349cc55cSDimitry Andric         return;
985349cc55cSDimitry Andric       }
986349cc55cSDimitry Andric       // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
987349cc55cSDimitry Andric       if (Leading > 32 && (Leading - 32) == C2 && C2 + C3 < 32 &&
988*81ad6265SDimitry Andric           OneUseOrZExtW && !IsCANDI) {
989*81ad6265SDimitry Andric         SDNode *SRLIW =
990*81ad6265SDimitry Andric             CurDAG->getMachineNode(RISCV::SRLIW, DL, VT, X,
991*81ad6265SDimitry Andric                                    CurDAG->getTargetConstant(C2 + C3, DL, VT));
992349cc55cSDimitry Andric         SDNode *SLLI =
993*81ad6265SDimitry Andric             CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
994*81ad6265SDimitry Andric                                    CurDAG->getTargetConstant(C3, DL, VT));
995349cc55cSDimitry Andric         ReplaceNode(Node, SLLI);
996349cc55cSDimitry Andric         return;
997349cc55cSDimitry Andric       }
998349cc55cSDimitry Andric     }
999349cc55cSDimitry Andric 
1000349cc55cSDimitry Andric     // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
1001349cc55cSDimitry Andric     // shifted mask with no leading zeros and c3 trailing zeros.
1002349cc55cSDimitry Andric     if (LeftShift && isShiftedMask_64(C1)) {
1003349cc55cSDimitry Andric       uint64_t Leading = XLen - (64 - countLeadingZeros(C1));
1004349cc55cSDimitry Andric       uint64_t C3 = countTrailingZeros(C1);
1005*81ad6265SDimitry Andric       if (Leading == 0 && C2 < C3 && OneUseOrZExtW && !IsCANDI) {
1006349cc55cSDimitry Andric         SDNode *SRLI = CurDAG->getMachineNode(
1007*81ad6265SDimitry Andric             RISCV::SRLI, DL, VT, X, CurDAG->getTargetConstant(C3 - C2, DL, VT));
1008349cc55cSDimitry Andric         SDNode *SLLI =
1009*81ad6265SDimitry Andric             CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
1010*81ad6265SDimitry Andric                                    CurDAG->getTargetConstant(C3, DL, VT));
1011349cc55cSDimitry Andric         ReplaceNode(Node, SLLI);
1012349cc55cSDimitry Andric         return;
1013349cc55cSDimitry Andric       }
1014349cc55cSDimitry Andric       // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
1015*81ad6265SDimitry Andric       if (C2 < C3 && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1016*81ad6265SDimitry Andric         SDNode *SRLIW =
1017*81ad6265SDimitry Andric             CurDAG->getMachineNode(RISCV::SRLIW, DL, VT, X,
1018*81ad6265SDimitry Andric                                    CurDAG->getTargetConstant(C3 - C2, DL, VT));
1019349cc55cSDimitry Andric         SDNode *SLLI =
1020*81ad6265SDimitry Andric             CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
1021*81ad6265SDimitry Andric                                    CurDAG->getTargetConstant(C3, DL, VT));
1022349cc55cSDimitry Andric         ReplaceNode(Node, SLLI);
1023349cc55cSDimitry Andric         return;
1024349cc55cSDimitry Andric       }
1025349cc55cSDimitry Andric     }
1026349cc55cSDimitry Andric 
1027fe6060f1SDimitry Andric     break;
1028fe6060f1SDimitry Andric   }
10290eae32dcSDimitry Andric   case ISD::MUL: {
10300eae32dcSDimitry Andric     // Special case for calculating (mul (and X, C2), C1) where the full product
10310eae32dcSDimitry Andric     // fits in XLen bits. We can shift X left by the number of leading zeros in
10320eae32dcSDimitry Andric     // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
10330eae32dcSDimitry Andric     // product has XLen trailing zeros, putting it in the output of MULHU. This
10340eae32dcSDimitry Andric     // can avoid materializing a constant in a register for C2.
10350eae32dcSDimitry Andric 
10360eae32dcSDimitry Andric     // RHS should be a constant.
10370eae32dcSDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
10380eae32dcSDimitry Andric     if (!N1C || !N1C->hasOneUse())
10390eae32dcSDimitry Andric       break;
10400eae32dcSDimitry Andric 
10410eae32dcSDimitry Andric     // LHS should be an AND with constant.
10420eae32dcSDimitry Andric     SDValue N0 = Node->getOperand(0);
10430eae32dcSDimitry Andric     if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
10440eae32dcSDimitry Andric       break;
10450eae32dcSDimitry Andric 
10460eae32dcSDimitry Andric     uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
10470eae32dcSDimitry Andric 
10480eae32dcSDimitry Andric     // Constant should be a mask.
10490eae32dcSDimitry Andric     if (!isMask_64(C2))
10500eae32dcSDimitry Andric       break;
10510eae32dcSDimitry Andric 
10520eae32dcSDimitry Andric     // This should be the only use of the AND unless we will use
10530eae32dcSDimitry Andric     // (SRLI (SLLI X, 32), 32). We don't use a shift pair for other AND
10540eae32dcSDimitry Andric     // constants.
10550eae32dcSDimitry Andric     if (!N0.hasOneUse() && C2 != UINT64_C(0xFFFFFFFF))
10560eae32dcSDimitry Andric       break;
10570eae32dcSDimitry Andric 
10580eae32dcSDimitry Andric     // If this can be an ANDI, ZEXT.H or ZEXT.W we don't need to do this
10590eae32dcSDimitry Andric     // optimization.
10600eae32dcSDimitry Andric     if (isInt<12>(C2) ||
10610eae32dcSDimitry Andric         (C2 == UINT64_C(0xFFFF) &&
10620eae32dcSDimitry Andric          (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) ||
10630eae32dcSDimitry Andric         (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba()))
10640eae32dcSDimitry Andric       break;
10650eae32dcSDimitry Andric 
10660eae32dcSDimitry Andric     // We need to shift left the AND input and C1 by a total of XLen bits.
10670eae32dcSDimitry Andric 
10680eae32dcSDimitry Andric     // How far left do we need to shift the AND input?
10690eae32dcSDimitry Andric     unsigned XLen = Subtarget->getXLen();
10700eae32dcSDimitry Andric     unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2));
10710eae32dcSDimitry Andric 
10720eae32dcSDimitry Andric     // The constant gets shifted by the remaining amount unless that would
10730eae32dcSDimitry Andric     // shift bits out.
10740eae32dcSDimitry Andric     uint64_t C1 = N1C->getZExtValue();
10750eae32dcSDimitry Andric     unsigned ConstantShift = XLen - LeadingZeros;
10760eae32dcSDimitry Andric     if (ConstantShift > (XLen - (64 - countLeadingZeros(C1))))
10770eae32dcSDimitry Andric       break;
10780eae32dcSDimitry Andric 
10790eae32dcSDimitry Andric     uint64_t ShiftedC1 = C1 << ConstantShift;
10800eae32dcSDimitry Andric     // If this RV32, we need to sign extend the constant.
10810eae32dcSDimitry Andric     if (XLen == 32)
1082*81ad6265SDimitry Andric       ShiftedC1 = SignExtend64<32>(ShiftedC1);
10830eae32dcSDimitry Andric 
10840eae32dcSDimitry Andric     // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
108504eeddc0SDimitry Andric     SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
10860eae32dcSDimitry Andric     SDNode *SLLI =
10870eae32dcSDimitry Andric         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
10880eae32dcSDimitry Andric                                CurDAG->getTargetConstant(LeadingZeros, DL, VT));
10890eae32dcSDimitry Andric     SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
10900eae32dcSDimitry Andric                                            SDValue(SLLI, 0), SDValue(Imm, 0));
10910eae32dcSDimitry Andric     ReplaceNode(Node, MULHU);
10920eae32dcSDimitry Andric     return;
10930eae32dcSDimitry Andric   }
1094fe6060f1SDimitry Andric   case ISD::INTRINSIC_WO_CHAIN: {
1095fe6060f1SDimitry Andric     unsigned IntNo = Node->getConstantOperandVal(0);
1096fe6060f1SDimitry Andric     switch (IntNo) {
1097fe6060f1SDimitry Andric       // By default we do not custom select any intrinsic.
1098fe6060f1SDimitry Andric     default:
1099fe6060f1SDimitry Andric       break;
1100fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsgeu:
1101fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsge: {
1102fe6060f1SDimitry Andric       SDValue Src1 = Node->getOperand(1);
1103fe6060f1SDimitry Andric       SDValue Src2 = Node->getOperand(2);
110404eeddc0SDimitry Andric       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
110504eeddc0SDimitry Andric       bool IsCmpUnsignedZero = false;
1106fe6060f1SDimitry Andric       // Only custom select scalar second operand.
1107fe6060f1SDimitry Andric       if (Src2.getValueType() != XLenVT)
1108fe6060f1SDimitry Andric         break;
1109fe6060f1SDimitry Andric       // Small constants are handled with patterns.
1110fe6060f1SDimitry Andric       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1111fe6060f1SDimitry Andric         int64_t CVal = C->getSExtValue();
111204eeddc0SDimitry Andric         if (CVal >= -15 && CVal <= 16) {
111304eeddc0SDimitry Andric           if (!IsUnsigned || CVal != 0)
1114fe6060f1SDimitry Andric             break;
111504eeddc0SDimitry Andric           IsCmpUnsignedZero = true;
1116fe6060f1SDimitry Andric         }
111704eeddc0SDimitry Andric       }
1118fe6060f1SDimitry Andric       MVT Src1VT = Src1.getSimpleValueType();
111904eeddc0SDimitry Andric       unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1120fe6060f1SDimitry Andric       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1121fe6060f1SDimitry Andric       default:
1122fe6060f1SDimitry Andric         llvm_unreachable("Unexpected LMUL!");
112304eeddc0SDimitry Andric #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)            \
112404eeddc0SDimitry Andric   case RISCVII::VLMUL::lmulenum:                                               \
112504eeddc0SDimitry Andric     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
112604eeddc0SDimitry Andric                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
112704eeddc0SDimitry Andric     VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix;                            \
112804eeddc0SDimitry Andric     VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b;                             \
1129fe6060f1SDimitry Andric     break;
113004eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
113104eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
113204eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
113304eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
113404eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
113504eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
113604eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
113704eeddc0SDimitry Andric #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1138fe6060f1SDimitry Andric       }
1139fe6060f1SDimitry Andric       SDValue SEW = CurDAG->getTargetConstant(
1140fe6060f1SDimitry Andric           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1141fe6060f1SDimitry Andric       SDValue VL;
1142fe6060f1SDimitry Andric       selectVLOp(Node->getOperand(3), VL);
1143fe6060f1SDimitry Andric 
114404eeddc0SDimitry Andric       // If vmsgeu with 0 immediate, expand it to vmset.
114504eeddc0SDimitry Andric       if (IsCmpUnsignedZero) {
114604eeddc0SDimitry Andric         ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
114704eeddc0SDimitry Andric         return;
114804eeddc0SDimitry Andric       }
114904eeddc0SDimitry Andric 
1150fe6060f1SDimitry Andric       // Expand to
1151fe6060f1SDimitry Andric       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
1152fe6060f1SDimitry Andric       SDValue Cmp = SDValue(
1153fe6060f1SDimitry Andric           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1154fe6060f1SDimitry Andric           0);
1155fe6060f1SDimitry Andric       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
1156fe6060f1SDimitry Andric                                                {Cmp, Cmp, VL, SEW}));
1157fe6060f1SDimitry Andric       return;
1158fe6060f1SDimitry Andric     }
1159fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsgeu_mask:
1160fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsge_mask: {
1161fe6060f1SDimitry Andric       SDValue Src1 = Node->getOperand(2);
1162fe6060f1SDimitry Andric       SDValue Src2 = Node->getOperand(3);
116304eeddc0SDimitry Andric       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
116404eeddc0SDimitry Andric       bool IsCmpUnsignedZero = false;
1165fe6060f1SDimitry Andric       // Only custom select scalar second operand.
1166fe6060f1SDimitry Andric       if (Src2.getValueType() != XLenVT)
1167fe6060f1SDimitry Andric         break;
1168fe6060f1SDimitry Andric       // Small constants are handled with patterns.
1169fe6060f1SDimitry Andric       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1170fe6060f1SDimitry Andric         int64_t CVal = C->getSExtValue();
117104eeddc0SDimitry Andric         if (CVal >= -15 && CVal <= 16) {
117204eeddc0SDimitry Andric           if (!IsUnsigned || CVal != 0)
1173fe6060f1SDimitry Andric             break;
117404eeddc0SDimitry Andric           IsCmpUnsignedZero = true;
1175fe6060f1SDimitry Andric         }
117604eeddc0SDimitry Andric       }
1177fe6060f1SDimitry Andric       MVT Src1VT = Src1.getSimpleValueType();
117804eeddc0SDimitry Andric       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
1179*81ad6265SDimitry Andric           VMOROpcode;
1180fe6060f1SDimitry Andric       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1181fe6060f1SDimitry Andric       default:
1182fe6060f1SDimitry Andric         llvm_unreachable("Unexpected LMUL!");
1183*81ad6265SDimitry Andric #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)                         \
118404eeddc0SDimitry Andric   case RISCVII::VLMUL::lmulenum:                                               \
118504eeddc0SDimitry Andric     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
118604eeddc0SDimitry Andric                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
118704eeddc0SDimitry Andric     VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK      \
118804eeddc0SDimitry Andric                                  : RISCV::PseudoVMSLT_VX_##suffix##_MASK;      \
1189fe6060f1SDimitry Andric     break;
1190*81ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
1191*81ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
1192*81ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
1193*81ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
1194*81ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
1195*81ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
1196*81ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
1197*81ad6265SDimitry Andric #undef CASE_VMSLT_OPCODES
1198fe6060f1SDimitry Andric       }
1199fe6060f1SDimitry Andric       // Mask operations use the LMUL from the mask type.
1200fe6060f1SDimitry Andric       switch (RISCVTargetLowering::getLMUL(VT)) {
1201fe6060f1SDimitry Andric       default:
1202fe6060f1SDimitry Andric         llvm_unreachable("Unexpected LMUL!");
1203*81ad6265SDimitry Andric #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)                       \
120404eeddc0SDimitry Andric   case RISCVII::VLMUL::lmulenum:                                               \
120504eeddc0SDimitry Andric     VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix;                              \
120604eeddc0SDimitry Andric     VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix;                            \
1207*81ad6265SDimitry Andric     VMOROpcode = RISCV::PseudoVMOR_MM_##suffix;                                \
1208fe6060f1SDimitry Andric     break;
1209*81ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
1210*81ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
1211*81ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
1212*81ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
1213*81ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
1214*81ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
1215*81ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
1216*81ad6265SDimitry Andric #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1217fe6060f1SDimitry Andric       }
1218fe6060f1SDimitry Andric       SDValue SEW = CurDAG->getTargetConstant(
1219fe6060f1SDimitry Andric           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1220fe6060f1SDimitry Andric       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
1221fe6060f1SDimitry Andric       SDValue VL;
1222fe6060f1SDimitry Andric       selectVLOp(Node->getOperand(5), VL);
1223fe6060f1SDimitry Andric       SDValue MaskedOff = Node->getOperand(1);
1224fe6060f1SDimitry Andric       SDValue Mask = Node->getOperand(4);
122504eeddc0SDimitry Andric 
1226*81ad6265SDimitry Andric       // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
122704eeddc0SDimitry Andric       if (IsCmpUnsignedZero) {
1228*81ad6265SDimitry Andric         // We don't need vmor if the MaskedOff and the Mask are the same
1229*81ad6265SDimitry Andric         // value.
1230*81ad6265SDimitry Andric         if (Mask == MaskedOff) {
1231*81ad6265SDimitry Andric           ReplaceUses(Node, Mask.getNode());
1232*81ad6265SDimitry Andric           return;
1233*81ad6265SDimitry Andric         }
1234*81ad6265SDimitry Andric         ReplaceNode(Node,
1235*81ad6265SDimitry Andric                     CurDAG->getMachineNode(VMOROpcode, DL, VT,
1236*81ad6265SDimitry Andric                                            {Mask, MaskedOff, VL, MaskSEW}));
123704eeddc0SDimitry Andric         return;
123804eeddc0SDimitry Andric       }
123904eeddc0SDimitry Andric 
1240fe6060f1SDimitry Andric       // If the MaskedOff value and the Mask are the same value use
1241349cc55cSDimitry Andric       // vmslt{u}.vx vt, va, x;  vmandn.mm vd, vd, vt
1242fe6060f1SDimitry Andric       // This avoids needing to copy v0 to vd before starting the next sequence.
1243fe6060f1SDimitry Andric       if (Mask == MaskedOff) {
1244fe6060f1SDimitry Andric         SDValue Cmp = SDValue(
1245fe6060f1SDimitry Andric             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1246fe6060f1SDimitry Andric             0);
1247349cc55cSDimitry Andric         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
1248fe6060f1SDimitry Andric                                                  {Mask, Cmp, VL, MaskSEW}));
1249fe6060f1SDimitry Andric         return;
1250fe6060f1SDimitry Andric       }
1251fe6060f1SDimitry Andric 
1252fe6060f1SDimitry Andric       // Mask needs to be copied to V0.
1253fe6060f1SDimitry Andric       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
1254fe6060f1SDimitry Andric                                            RISCV::V0, Mask, SDValue());
1255fe6060f1SDimitry Andric       SDValue Glue = Chain.getValue(1);
1256fe6060f1SDimitry Andric       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
1257fe6060f1SDimitry Andric 
1258fe6060f1SDimitry Andric       // Otherwise use
1259fe6060f1SDimitry Andric       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
1260*81ad6265SDimitry Andric       // The result is mask undisturbed.
1261*81ad6265SDimitry Andric       // We use the same instructions to emulate mask agnostic behavior, because
1262*81ad6265SDimitry Andric       // the agnostic result can be either undisturbed or all 1.
1263fe6060f1SDimitry Andric       SDValue Cmp = SDValue(
1264fe6060f1SDimitry Andric           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
1265fe6060f1SDimitry Andric                                  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1266fe6060f1SDimitry Andric           0);
1267*81ad6265SDimitry Andric       // vmxor.mm vd, vd, v0 is used to update active value.
1268fe6060f1SDimitry Andric       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
1269fe6060f1SDimitry Andric                                                {Cmp, Mask, VL, MaskSEW}));
1270fe6060f1SDimitry Andric       return;
1271fe6060f1SDimitry Andric     }
127204eeddc0SDimitry Andric     case Intrinsic::riscv_vsetvli_opt:
127304eeddc0SDimitry Andric     case Intrinsic::riscv_vsetvlimax_opt:
127404eeddc0SDimitry Andric       return selectVSETVLI(Node);
1275fe6060f1SDimitry Andric     }
1276fe6060f1SDimitry Andric     break;
1277fe6060f1SDimitry Andric   }
1278e8d8bef9SDimitry Andric   case ISD::INTRINSIC_W_CHAIN: {
1279e8d8bef9SDimitry Andric     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1280e8d8bef9SDimitry Andric     switch (IntNo) {
1281e8d8bef9SDimitry Andric       // By default we do not custom select any intrinsic.
1282e8d8bef9SDimitry Andric     default:
12830b57cec5SDimitry Andric       break;
1284fe6060f1SDimitry Andric     case Intrinsic::riscv_vsetvli:
128504eeddc0SDimitry Andric     case Intrinsic::riscv_vsetvlimax:
128604eeddc0SDimitry Andric       return selectVSETVLI(Node);
1287e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg2:
1288e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg3:
1289e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg4:
1290e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg5:
1291e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg6:
1292e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg7:
1293e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg8: {
1294fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1295e8d8bef9SDimitry Andric       return;
1296e8d8bef9SDimitry Andric     }
1297e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg2_mask:
1298e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg3_mask:
1299e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg4_mask:
1300e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg5_mask:
1301e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg6_mask:
1302e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg7_mask:
1303e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg8_mask: {
1304fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1305e8d8bef9SDimitry Andric       return;
1306e8d8bef9SDimitry Andric     }
1307e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg2:
1308e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg3:
1309e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg4:
1310e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg5:
1311e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg6:
1312e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg7:
1313e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg8: {
1314fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1315e8d8bef9SDimitry Andric       return;
1316e8d8bef9SDimitry Andric     }
1317e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg2_mask:
1318e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg3_mask:
1319e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg4_mask:
1320e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg5_mask:
1321e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg6_mask:
1322e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg7_mask:
1323e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg8_mask: {
1324fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1325e8d8bef9SDimitry Andric       return;
1326e8d8bef9SDimitry Andric     }
1327e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg2:
1328e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg3:
1329e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg4:
1330e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg5:
1331e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg6:
1332e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg7:
1333e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg8:
1334fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1335fe6060f1SDimitry Andric       return;
1336e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg2:
1337e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg3:
1338e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg4:
1339e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg5:
1340e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg6:
1341e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg7:
1342fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxseg8:
1343fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1344e8d8bef9SDimitry Andric       return;
1345e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg2_mask:
1346e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg3_mask:
1347e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg4_mask:
1348e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg5_mask:
1349e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg6_mask:
1350e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg7_mask:
1351e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg8_mask:
1352fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1353fe6060f1SDimitry Andric       return;
1354e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg2_mask:
1355e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg3_mask:
1356e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg4_mask:
1357e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg5_mask:
1358e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg6_mask:
1359e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg7_mask:
1360fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxseg8_mask:
1361fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1362fe6060f1SDimitry Andric       return;
1363fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg8ff:
1364fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg7ff:
1365fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg6ff:
1366fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg5ff:
1367fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg4ff:
1368fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg3ff:
1369fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg2ff: {
1370fe6060f1SDimitry Andric       selectVLSEGFF(Node, /*IsMasked*/ false);
1371fe6060f1SDimitry Andric       return;
1372fe6060f1SDimitry Andric     }
1373fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg8ff_mask:
1374fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg7ff_mask:
1375fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg6ff_mask:
1376fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg5ff_mask:
1377fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg4ff_mask:
1378fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg3ff_mask:
1379fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg2ff_mask: {
1380fe6060f1SDimitry Andric       selectVLSEGFF(Node, /*IsMasked*/ true);
1381fe6060f1SDimitry Andric       return;
1382fe6060f1SDimitry Andric     }
1383fe6060f1SDimitry Andric     case Intrinsic::riscv_vloxei:
1384fe6060f1SDimitry Andric     case Intrinsic::riscv_vloxei_mask:
1385fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxei:
1386fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxei_mask: {
1387fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1388fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vluxei_mask;
1389fe6060f1SDimitry Andric       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1390fe6060f1SDimitry Andric                        IntNo == Intrinsic::riscv_vloxei_mask;
1391fe6060f1SDimitry Andric 
1392fe6060f1SDimitry Andric       MVT VT = Node->getSimpleValueType(0);
1393fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1394fe6060f1SDimitry Andric 
1395fe6060f1SDimitry Andric       unsigned CurOp = 2;
139604eeddc0SDimitry Andric       // Masked intrinsic only have TU version pseduo instructions.
1397*81ad6265SDimitry Andric       bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1398fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
139904eeddc0SDimitry Andric       if (IsTU)
1400fe6060f1SDimitry Andric         Operands.push_back(Node->getOperand(CurOp++));
140104eeddc0SDimitry Andric       else
140204eeddc0SDimitry Andric         // Skip the undef passthru operand for nomask TA version pseudo
140304eeddc0SDimitry Andric         CurOp++;
1404fe6060f1SDimitry Andric 
1405fe6060f1SDimitry Andric       MVT IndexVT;
1406fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1407fe6060f1SDimitry Andric                                  /*IsStridedOrIndexed*/ true, Operands,
1408349cc55cSDimitry Andric                                  /*IsLoad=*/true, &IndexVT);
1409fe6060f1SDimitry Andric 
1410fe6060f1SDimitry Andric       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1411fe6060f1SDimitry Andric              "Element count mismatch");
1412fe6060f1SDimitry Andric 
1413fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1414fe6060f1SDimitry Andric       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1415fe6060f1SDimitry Andric       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
141604eeddc0SDimitry Andric       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
141704eeddc0SDimitry Andric         report_fatal_error("The V extension does not support EEW=64 for index "
141804eeddc0SDimitry Andric                            "values when XLEN=32");
141904eeddc0SDimitry Andric       }
1420fe6060f1SDimitry Andric       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
142104eeddc0SDimitry Andric           IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1422fe6060f1SDimitry Andric           static_cast<unsigned>(IndexLMUL));
1423fe6060f1SDimitry Andric       MachineSDNode *Load =
1424fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1425fe6060f1SDimitry Andric 
1426fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1427fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1428fe6060f1SDimitry Andric 
1429fe6060f1SDimitry Andric       ReplaceNode(Node, Load);
1430fe6060f1SDimitry Andric       return;
1431fe6060f1SDimitry Andric     }
1432349cc55cSDimitry Andric     case Intrinsic::riscv_vlm:
1433fe6060f1SDimitry Andric     case Intrinsic::riscv_vle:
1434fe6060f1SDimitry Andric     case Intrinsic::riscv_vle_mask:
1435fe6060f1SDimitry Andric     case Intrinsic::riscv_vlse:
1436fe6060f1SDimitry Andric     case Intrinsic::riscv_vlse_mask: {
1437fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1438fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vlse_mask;
1439fe6060f1SDimitry Andric       bool IsStrided =
1440fe6060f1SDimitry Andric           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1441fe6060f1SDimitry Andric 
1442fe6060f1SDimitry Andric       MVT VT = Node->getSimpleValueType(0);
1443fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1444fe6060f1SDimitry Andric 
1445fe6060f1SDimitry Andric       unsigned CurOp = 2;
144604eeddc0SDimitry Andric       // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
144704eeddc0SDimitry Andric       bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
144804eeddc0SDimitry Andric       // Masked intrinsic only have TU version pseduo instructions.
1449*81ad6265SDimitry Andric       bool IsTU = HasPassthruOperand &&
1450*81ad6265SDimitry Andric                   (IsMasked || !Node->getOperand(CurOp).isUndef());
1451fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
145204eeddc0SDimitry Andric       if (IsTU)
1453fe6060f1SDimitry Andric         Operands.push_back(Node->getOperand(CurOp++));
145404eeddc0SDimitry Andric       else if (HasPassthruOperand)
145504eeddc0SDimitry Andric         // Skip the undef passthru operand for nomask TA version pseudo
145604eeddc0SDimitry Andric         CurOp++;
1457fe6060f1SDimitry Andric 
1458fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1459349cc55cSDimitry Andric                                  Operands, /*IsLoad=*/true);
1460fe6060f1SDimitry Andric 
1461fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1462fe6060f1SDimitry Andric       const RISCV::VLEPseudo *P =
146304eeddc0SDimitry Andric           RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
1464fe6060f1SDimitry Andric                               static_cast<unsigned>(LMUL));
1465fe6060f1SDimitry Andric       MachineSDNode *Load =
1466fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1467fe6060f1SDimitry Andric 
1468fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1469fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1470fe6060f1SDimitry Andric 
1471fe6060f1SDimitry Andric       ReplaceNode(Node, Load);
1472fe6060f1SDimitry Andric       return;
1473fe6060f1SDimitry Andric     }
1474fe6060f1SDimitry Andric     case Intrinsic::riscv_vleff:
1475fe6060f1SDimitry Andric     case Intrinsic::riscv_vleff_mask: {
1476fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1477fe6060f1SDimitry Andric 
1478fe6060f1SDimitry Andric       MVT VT = Node->getSimpleValueType(0);
1479fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1480fe6060f1SDimitry Andric 
1481fe6060f1SDimitry Andric       unsigned CurOp = 2;
148204eeddc0SDimitry Andric       // Masked intrinsic only have TU version pseduo instructions.
1483*81ad6265SDimitry Andric       bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1484fe6060f1SDimitry Andric       SmallVector<SDValue, 7> Operands;
148504eeddc0SDimitry Andric       if (IsTU)
1486fe6060f1SDimitry Andric         Operands.push_back(Node->getOperand(CurOp++));
148704eeddc0SDimitry Andric       else
148804eeddc0SDimitry Andric         // Skip the undef passthru operand for nomask TA version pseudo
148904eeddc0SDimitry Andric         CurOp++;
1490fe6060f1SDimitry Andric 
1491fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1492349cc55cSDimitry Andric                                  /*IsStridedOrIndexed*/ false, Operands,
1493349cc55cSDimitry Andric                                  /*IsLoad=*/true);
1494fe6060f1SDimitry Andric 
1495fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1496fe6060f1SDimitry Andric       const RISCV::VLEPseudo *P =
149704eeddc0SDimitry Andric           RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
149804eeddc0SDimitry Andric                               Log2SEW, static_cast<unsigned>(LMUL));
1499*81ad6265SDimitry Andric       MachineSDNode *Load = CurDAG->getMachineNode(
1500*81ad6265SDimitry Andric           P->Pseudo, DL, Node->getVTList(), Operands);
1501fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1502fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1503fe6060f1SDimitry Andric 
1504*81ad6265SDimitry Andric       ReplaceNode(Node, Load);
15050b57cec5SDimitry Andric       return;
15060b57cec5SDimitry Andric     }
15070b57cec5SDimitry Andric     }
15080b57cec5SDimitry Andric     break;
15090b57cec5SDimitry Andric   }
1510e8d8bef9SDimitry Andric   case ISD::INTRINSIC_VOID: {
1511e8d8bef9SDimitry Andric     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1512e8d8bef9SDimitry Andric     switch (IntNo) {
1513e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg2:
1514e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg3:
1515e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg4:
1516e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg5:
1517e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg6:
1518e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg7:
1519e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg8: {
1520fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
15210b57cec5SDimitry Andric       return;
15220b57cec5SDimitry Andric     }
1523e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg2_mask:
1524e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg3_mask:
1525e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg4_mask:
1526e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg5_mask:
1527e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg6_mask:
1528e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg7_mask:
1529e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg8_mask: {
1530fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1531e8d8bef9SDimitry Andric       return;
1532e8d8bef9SDimitry Andric     }
1533e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg2:
1534e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg3:
1535e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg4:
1536e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg5:
1537e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg6:
1538e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg7:
1539e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg8: {
1540fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1541e8d8bef9SDimitry Andric       return;
1542e8d8bef9SDimitry Andric     }
1543e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg2_mask:
1544e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg3_mask:
1545e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg4_mask:
1546e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg5_mask:
1547e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg6_mask:
1548e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg7_mask:
1549e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg8_mask: {
1550fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1551e8d8bef9SDimitry Andric       return;
1552e8d8bef9SDimitry Andric     }
1553e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg2:
1554e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg3:
1555e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg4:
1556e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg5:
1557e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg6:
1558e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg7:
1559e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg8:
1560fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1561fe6060f1SDimitry Andric       return;
1562e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg2:
1563e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg3:
1564e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg4:
1565e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg5:
1566e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg6:
1567e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg7:
1568fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxseg8:
1569fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1570e8d8bef9SDimitry Andric       return;
1571e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg2_mask:
1572e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg3_mask:
1573e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg4_mask:
1574e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg5_mask:
1575e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg6_mask:
1576e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg7_mask:
1577e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg8_mask:
1578fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1579fe6060f1SDimitry Andric       return;
1580e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg2_mask:
1581e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg3_mask:
1582e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg4_mask:
1583e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg5_mask:
1584e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg6_mask:
1585e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg7_mask:
1586fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxseg8_mask:
1587fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1588fe6060f1SDimitry Andric       return;
1589fe6060f1SDimitry Andric     case Intrinsic::riscv_vsoxei:
1590fe6060f1SDimitry Andric     case Intrinsic::riscv_vsoxei_mask:
1591fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxei:
1592fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxei_mask: {
1593fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1594fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vsuxei_mask;
1595fe6060f1SDimitry Andric       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1596fe6060f1SDimitry Andric                        IntNo == Intrinsic::riscv_vsoxei_mask;
1597fe6060f1SDimitry Andric 
1598fe6060f1SDimitry Andric       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1599fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1600fe6060f1SDimitry Andric 
1601fe6060f1SDimitry Andric       unsigned CurOp = 2;
1602fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
1603fe6060f1SDimitry Andric       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1604fe6060f1SDimitry Andric 
1605fe6060f1SDimitry Andric       MVT IndexVT;
1606fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1607fe6060f1SDimitry Andric                                  /*IsStridedOrIndexed*/ true, Operands,
1608349cc55cSDimitry Andric                                  /*IsLoad=*/false, &IndexVT);
1609fe6060f1SDimitry Andric 
1610fe6060f1SDimitry Andric       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1611fe6060f1SDimitry Andric              "Element count mismatch");
1612fe6060f1SDimitry Andric 
1613fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1614fe6060f1SDimitry Andric       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1615fe6060f1SDimitry Andric       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
161604eeddc0SDimitry Andric       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
161704eeddc0SDimitry Andric         report_fatal_error("The V extension does not support EEW=64 for index "
161804eeddc0SDimitry Andric                            "values when XLEN=32");
161904eeddc0SDimitry Andric       }
1620fe6060f1SDimitry Andric       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
162104eeddc0SDimitry Andric           IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
162204eeddc0SDimitry Andric           static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
1623fe6060f1SDimitry Andric       MachineSDNode *Store =
1624fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1625fe6060f1SDimitry Andric 
1626fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1627fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1628fe6060f1SDimitry Andric 
1629fe6060f1SDimitry Andric       ReplaceNode(Node, Store);
1630fe6060f1SDimitry Andric       return;
1631fe6060f1SDimitry Andric     }
1632349cc55cSDimitry Andric     case Intrinsic::riscv_vsm:
1633fe6060f1SDimitry Andric     case Intrinsic::riscv_vse:
1634fe6060f1SDimitry Andric     case Intrinsic::riscv_vse_mask:
1635fe6060f1SDimitry Andric     case Intrinsic::riscv_vsse:
1636fe6060f1SDimitry Andric     case Intrinsic::riscv_vsse_mask: {
1637fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1638fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vsse_mask;
1639fe6060f1SDimitry Andric       bool IsStrided =
1640fe6060f1SDimitry Andric           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1641fe6060f1SDimitry Andric 
1642fe6060f1SDimitry Andric       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1643fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1644fe6060f1SDimitry Andric 
1645fe6060f1SDimitry Andric       unsigned CurOp = 2;
1646fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
1647fe6060f1SDimitry Andric       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1648fe6060f1SDimitry Andric 
1649fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1650fe6060f1SDimitry Andric                                  Operands);
1651fe6060f1SDimitry Andric 
1652fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1653fe6060f1SDimitry Andric       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1654fe6060f1SDimitry Andric           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1655fe6060f1SDimitry Andric       MachineSDNode *Store =
1656fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1657fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1658fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1659fe6060f1SDimitry Andric 
1660fe6060f1SDimitry Andric       ReplaceNode(Node, Store);
1661e8d8bef9SDimitry Andric       return;
1662e8d8bef9SDimitry Andric     }
1663e8d8bef9SDimitry Andric     }
1664e8d8bef9SDimitry Andric     break;
1665e8d8bef9SDimitry Andric   }
1666fe6060f1SDimitry Andric   case ISD::BITCAST: {
1667fe6060f1SDimitry Andric     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1668fe6060f1SDimitry Andric     // Just drop bitcasts between vectors if both are fixed or both are
1669fe6060f1SDimitry Andric     // scalable.
1670fe6060f1SDimitry Andric     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1671fe6060f1SDimitry Andric         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1672fe6060f1SDimitry Andric       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1673fe6060f1SDimitry Andric       CurDAG->RemoveDeadNode(Node);
1674e8d8bef9SDimitry Andric       return;
1675e8d8bef9SDimitry Andric     }
1676fe6060f1SDimitry Andric     break;
1677fe6060f1SDimitry Andric   }
1678fe6060f1SDimitry Andric   case ISD::INSERT_SUBVECTOR: {
1679fe6060f1SDimitry Andric     SDValue V = Node->getOperand(0);
1680fe6060f1SDimitry Andric     SDValue SubV = Node->getOperand(1);
1681fe6060f1SDimitry Andric     SDLoc DL(SubV);
1682fe6060f1SDimitry Andric     auto Idx = Node->getConstantOperandVal(2);
1683fe6060f1SDimitry Andric     MVT SubVecVT = SubV.getSimpleValueType();
1684fe6060f1SDimitry Andric 
1685fe6060f1SDimitry Andric     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1686fe6060f1SDimitry Andric     MVT SubVecContainerVT = SubVecVT;
1687fe6060f1SDimitry Andric     // Establish the correct scalable-vector types for any fixed-length type.
1688fe6060f1SDimitry Andric     if (SubVecVT.isFixedLengthVector())
1689fe6060f1SDimitry Andric       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1690fe6060f1SDimitry Andric     if (VT.isFixedLengthVector())
1691fe6060f1SDimitry Andric       VT = TLI.getContainerForFixedLengthVector(VT);
1692fe6060f1SDimitry Andric 
1693fe6060f1SDimitry Andric     const auto *TRI = Subtarget->getRegisterInfo();
1694fe6060f1SDimitry Andric     unsigned SubRegIdx;
1695fe6060f1SDimitry Andric     std::tie(SubRegIdx, Idx) =
1696fe6060f1SDimitry Andric         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1697fe6060f1SDimitry Andric             VT, SubVecContainerVT, Idx, TRI);
1698fe6060f1SDimitry Andric 
1699fe6060f1SDimitry Andric     // If the Idx hasn't been completely eliminated then this is a subvector
1700fe6060f1SDimitry Andric     // insert which doesn't naturally align to a vector register. These must
1701fe6060f1SDimitry Andric     // be handled using instructions to manipulate the vector registers.
1702fe6060f1SDimitry Andric     if (Idx != 0)
1703fe6060f1SDimitry Andric       break;
1704fe6060f1SDimitry Andric 
1705fe6060f1SDimitry Andric     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1706fe6060f1SDimitry Andric     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1707fe6060f1SDimitry Andric                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1708fe6060f1SDimitry Andric                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1709fe6060f1SDimitry Andric     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1710fe6060f1SDimitry Andric     assert((!IsSubVecPartReg || V.isUndef()) &&
1711fe6060f1SDimitry Andric            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1712fe6060f1SDimitry Andric            "the subvector is smaller than a full-sized register");
1713fe6060f1SDimitry Andric 
1714fe6060f1SDimitry Andric     // If we haven't set a SubRegIdx, then we must be going between
1715fe6060f1SDimitry Andric     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1716fe6060f1SDimitry Andric     if (SubRegIdx == RISCV::NoSubRegister) {
1717fe6060f1SDimitry Andric       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1718fe6060f1SDimitry Andric       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1719fe6060f1SDimitry Andric                  InRegClassID &&
1720fe6060f1SDimitry Andric              "Unexpected subvector extraction");
1721fe6060f1SDimitry Andric       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1722fe6060f1SDimitry Andric       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1723fe6060f1SDimitry Andric                                                DL, VT, SubV, RC);
1724fe6060f1SDimitry Andric       ReplaceNode(Node, NewNode);
1725fe6060f1SDimitry Andric       return;
1726fe6060f1SDimitry Andric     }
1727fe6060f1SDimitry Andric 
1728fe6060f1SDimitry Andric     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1729fe6060f1SDimitry Andric     ReplaceNode(Node, Insert.getNode());
1730fe6060f1SDimitry Andric     return;
1731fe6060f1SDimitry Andric   }
1732fe6060f1SDimitry Andric   case ISD::EXTRACT_SUBVECTOR: {
1733fe6060f1SDimitry Andric     SDValue V = Node->getOperand(0);
1734fe6060f1SDimitry Andric     auto Idx = Node->getConstantOperandVal(1);
1735fe6060f1SDimitry Andric     MVT InVT = V.getSimpleValueType();
1736fe6060f1SDimitry Andric     SDLoc DL(V);
1737fe6060f1SDimitry Andric 
1738fe6060f1SDimitry Andric     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1739fe6060f1SDimitry Andric     MVT SubVecContainerVT = VT;
1740fe6060f1SDimitry Andric     // Establish the correct scalable-vector types for any fixed-length type.
1741fe6060f1SDimitry Andric     if (VT.isFixedLengthVector())
1742fe6060f1SDimitry Andric       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1743fe6060f1SDimitry Andric     if (InVT.isFixedLengthVector())
1744fe6060f1SDimitry Andric       InVT = TLI.getContainerForFixedLengthVector(InVT);
1745fe6060f1SDimitry Andric 
1746fe6060f1SDimitry Andric     const auto *TRI = Subtarget->getRegisterInfo();
1747fe6060f1SDimitry Andric     unsigned SubRegIdx;
1748fe6060f1SDimitry Andric     std::tie(SubRegIdx, Idx) =
1749fe6060f1SDimitry Andric         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1750fe6060f1SDimitry Andric             InVT, SubVecContainerVT, Idx, TRI);
1751fe6060f1SDimitry Andric 
1752fe6060f1SDimitry Andric     // If the Idx hasn't been completely eliminated then this is a subvector
1753fe6060f1SDimitry Andric     // extract which doesn't naturally align to a vector register. These must
1754fe6060f1SDimitry Andric     // be handled using instructions to manipulate the vector registers.
1755fe6060f1SDimitry Andric     if (Idx != 0)
1756fe6060f1SDimitry Andric       break;
1757fe6060f1SDimitry Andric 
1758fe6060f1SDimitry Andric     // If we haven't set a SubRegIdx, then we must be going between
1759fe6060f1SDimitry Andric     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1760fe6060f1SDimitry Andric     if (SubRegIdx == RISCV::NoSubRegister) {
1761fe6060f1SDimitry Andric       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1762fe6060f1SDimitry Andric       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1763fe6060f1SDimitry Andric                  InRegClassID &&
1764fe6060f1SDimitry Andric              "Unexpected subvector extraction");
1765fe6060f1SDimitry Andric       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1766fe6060f1SDimitry Andric       SDNode *NewNode =
1767fe6060f1SDimitry Andric           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1768fe6060f1SDimitry Andric       ReplaceNode(Node, NewNode);
1769fe6060f1SDimitry Andric       return;
1770fe6060f1SDimitry Andric     }
1771fe6060f1SDimitry Andric 
1772fe6060f1SDimitry Andric     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1773fe6060f1SDimitry Andric     ReplaceNode(Node, Extract.getNode());
1774fe6060f1SDimitry Andric     return;
1775fe6060f1SDimitry Andric   }
17760eae32dcSDimitry Andric   case ISD::SPLAT_VECTOR:
177704eeddc0SDimitry Andric   case RISCVISD::VMV_S_X_VL:
177804eeddc0SDimitry Andric   case RISCVISD::VFMV_S_F_VL:
1779fe6060f1SDimitry Andric   case RISCVISD::VMV_V_X_VL:
1780fe6060f1SDimitry Andric   case RISCVISD::VFMV_V_F_VL: {
1781fe6060f1SDimitry Andric     // Try to match splat of a scalar load to a strided load with stride of x0.
178204eeddc0SDimitry Andric     bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
178304eeddc0SDimitry Andric                         Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
1784*81ad6265SDimitry Andric     bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR;
1785*81ad6265SDimitry Andric     if (HasPassthruOperand && !Node->getOperand(0).isUndef())
178604eeddc0SDimitry Andric       break;
1787*81ad6265SDimitry Andric     SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0);
1788fe6060f1SDimitry Andric     auto *Ld = dyn_cast<LoadSDNode>(Src);
1789fe6060f1SDimitry Andric     if (!Ld)
1790fe6060f1SDimitry Andric       break;
1791fe6060f1SDimitry Andric     EVT MemVT = Ld->getMemoryVT();
1792fe6060f1SDimitry Andric     // The memory VT should be the same size as the element type.
1793fe6060f1SDimitry Andric     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1794fe6060f1SDimitry Andric       break;
1795fe6060f1SDimitry Andric     if (!IsProfitableToFold(Src, Node, Node) ||
1796fe6060f1SDimitry Andric         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1797fe6060f1SDimitry Andric       break;
1798fe6060f1SDimitry Andric 
1799fe6060f1SDimitry Andric     SDValue VL;
18000eae32dcSDimitry Andric     if (Node->getOpcode() == ISD::SPLAT_VECTOR)
18010eae32dcSDimitry Andric       VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
180204eeddc0SDimitry Andric     else if (IsScalarMove) {
180304eeddc0SDimitry Andric       // We could deal with more VL if we update the VSETVLI insert pass to
180404eeddc0SDimitry Andric       // avoid introducing more VSETVLI.
180504eeddc0SDimitry Andric       if (!isOneConstant(Node->getOperand(2)))
180604eeddc0SDimitry Andric         break;
180704eeddc0SDimitry Andric       selectVLOp(Node->getOperand(2), VL);
180804eeddc0SDimitry Andric     } else
1809*81ad6265SDimitry Andric       selectVLOp(Node->getOperand(2), VL);
1810fe6060f1SDimitry Andric 
1811fe6060f1SDimitry Andric     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1812fe6060f1SDimitry Andric     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1813fe6060f1SDimitry Andric 
1814fe6060f1SDimitry Andric     SDValue Operands[] = {Ld->getBasePtr(),
1815fe6060f1SDimitry Andric                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1816fe6060f1SDimitry Andric                           Ld->getChain()};
1817fe6060f1SDimitry Andric 
1818fe6060f1SDimitry Andric     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1819fe6060f1SDimitry Andric     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
182004eeddc0SDimitry Andric         /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
182104eeddc0SDimitry Andric         Log2SEW, static_cast<unsigned>(LMUL));
1822fe6060f1SDimitry Andric     MachineSDNode *Load =
1823fe6060f1SDimitry Andric         CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1824fe6060f1SDimitry Andric 
1825*81ad6265SDimitry Andric     CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
1826fe6060f1SDimitry Andric 
1827fe6060f1SDimitry Andric     ReplaceNode(Node, Load);
1828e8d8bef9SDimitry Andric     return;
1829e8d8bef9SDimitry Andric   }
1830e8d8bef9SDimitry Andric   }
18310b57cec5SDimitry Andric 
18320b57cec5SDimitry Andric   // Select the default instruction.
18330b57cec5SDimitry Andric   SelectCode(Node);
18340b57cec5SDimitry Andric }
18350b57cec5SDimitry Andric 
18360b57cec5SDimitry Andric bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
18370b57cec5SDimitry Andric     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
18380b57cec5SDimitry Andric   switch (ConstraintID) {
18390b57cec5SDimitry Andric   case InlineAsm::Constraint_m:
18400b57cec5SDimitry Andric     // We just support simple memory operands that have a single address
18410b57cec5SDimitry Andric     // operand and need no special handling.
18420b57cec5SDimitry Andric     OutOps.push_back(Op);
18430b57cec5SDimitry Andric     return false;
18440b57cec5SDimitry Andric   case InlineAsm::Constraint_A:
18450b57cec5SDimitry Andric     OutOps.push_back(Op);
18460b57cec5SDimitry Andric     return false;
18470b57cec5SDimitry Andric   default:
18480b57cec5SDimitry Andric     break;
18490b57cec5SDimitry Andric   }
18500b57cec5SDimitry Andric 
18510b57cec5SDimitry Andric   return true;
18520b57cec5SDimitry Andric }
18530b57cec5SDimitry Andric 
1854*81ad6265SDimitry Andric bool RISCVDAGToDAGISel::SelectAddrFrameIndex(SDValue Addr, SDValue &Base,
1855*81ad6265SDimitry Andric                                              SDValue &Offset) {
1856fe6060f1SDimitry Andric   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
18570b57cec5SDimitry Andric     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1858*81ad6265SDimitry Andric     Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
18590b57cec5SDimitry Andric     return true;
18600b57cec5SDimitry Andric   }
1861*81ad6265SDimitry Andric 
1862*81ad6265SDimitry Andric   return false;
1863*81ad6265SDimitry Andric }
1864*81ad6265SDimitry Andric 
1865*81ad6265SDimitry Andric // Select a frame index and an optional immediate offset from an ADD or OR.
1866*81ad6265SDimitry Andric bool RISCVDAGToDAGISel::SelectFrameAddrRegImm(SDValue Addr, SDValue &Base,
1867*81ad6265SDimitry Andric                                               SDValue &Offset) {
1868*81ad6265SDimitry Andric   if (SelectAddrFrameIndex(Addr, Base, Offset))
1869*81ad6265SDimitry Andric     return true;
1870*81ad6265SDimitry Andric 
1871*81ad6265SDimitry Andric   if (!CurDAG->isBaseWithConstantOffset(Addr))
1872*81ad6265SDimitry Andric     return false;
1873*81ad6265SDimitry Andric 
1874*81ad6265SDimitry Andric   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
1875*81ad6265SDimitry Andric     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1876*81ad6265SDimitry Andric     if (isInt<12>(CVal)) {
1877*81ad6265SDimitry Andric       Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
1878*81ad6265SDimitry Andric                                          Subtarget->getXLenVT());
1879*81ad6265SDimitry Andric       Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr),
1880*81ad6265SDimitry Andric                                          Subtarget->getXLenVT());
1881*81ad6265SDimitry Andric       return true;
1882*81ad6265SDimitry Andric     }
1883*81ad6265SDimitry Andric   }
1884*81ad6265SDimitry Andric 
18850b57cec5SDimitry Andric   return false;
18860b57cec5SDimitry Andric }
18870b57cec5SDimitry Andric 
1888fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::SelectBaseAddr(SDValue Addr, SDValue &Base) {
1889fe6060f1SDimitry Andric   // If this is FrameIndex, select it directly. Otherwise just let it get
1890fe6060f1SDimitry Andric   // selected to a register independently.
1891fe6060f1SDimitry Andric   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr))
1892fe6060f1SDimitry Andric     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
1893fe6060f1SDimitry Andric   else
1894fe6060f1SDimitry Andric     Base = Addr;
1895fe6060f1SDimitry Andric   return true;
1896e8d8bef9SDimitry Andric }
1897e8d8bef9SDimitry Andric 
1898*81ad6265SDimitry Andric bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base,
1899*81ad6265SDimitry Andric                                          SDValue &Offset) {
1900*81ad6265SDimitry Andric   if (SelectAddrFrameIndex(Addr, Base, Offset))
1901*81ad6265SDimitry Andric     return true;
1902*81ad6265SDimitry Andric 
1903*81ad6265SDimitry Andric   SDLoc DL(Addr);
1904*81ad6265SDimitry Andric   MVT VT = Addr.getSimpleValueType();
1905*81ad6265SDimitry Andric 
1906*81ad6265SDimitry Andric   if (Addr.getOpcode() == RISCVISD::ADD_LO) {
1907*81ad6265SDimitry Andric     Base = Addr.getOperand(0);
1908*81ad6265SDimitry Andric     Offset = Addr.getOperand(1);
1909*81ad6265SDimitry Andric     return true;
1910*81ad6265SDimitry Andric   }
1911*81ad6265SDimitry Andric 
1912*81ad6265SDimitry Andric   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1913*81ad6265SDimitry Andric     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1914*81ad6265SDimitry Andric     if (isInt<12>(CVal)) {
1915*81ad6265SDimitry Andric       Base = Addr.getOperand(0);
1916*81ad6265SDimitry Andric       if (Base.getOpcode() == RISCVISD::ADD_LO) {
1917*81ad6265SDimitry Andric         SDValue LoOperand = Base.getOperand(1);
1918*81ad6265SDimitry Andric         if (auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
1919*81ad6265SDimitry Andric           // If the Lo in (ADD_LO hi, lo) is a global variable's address
1920*81ad6265SDimitry Andric           // (its low part, really), then we can rely on the alignment of that
1921*81ad6265SDimitry Andric           // variable to provide a margin of safety before low part can overflow
1922*81ad6265SDimitry Andric           // the 12 bits of the load/store offset. Check if CVal falls within
1923*81ad6265SDimitry Andric           // that margin; if so (low part + CVal) can't overflow.
1924*81ad6265SDimitry Andric           const DataLayout &DL = CurDAG->getDataLayout();
1925*81ad6265SDimitry Andric           Align Alignment = commonAlignment(
1926*81ad6265SDimitry Andric               GA->getGlobal()->getPointerAlignment(DL), GA->getOffset());
1927*81ad6265SDimitry Andric           if (CVal == 0 || Alignment > CVal) {
1928*81ad6265SDimitry Andric             int64_t CombinedOffset = CVal + GA->getOffset();
1929*81ad6265SDimitry Andric             Base = Base.getOperand(0);
1930*81ad6265SDimitry Andric             Offset = CurDAG->getTargetGlobalAddress(
1931*81ad6265SDimitry Andric                 GA->getGlobal(), SDLoc(LoOperand), LoOperand.getValueType(),
1932*81ad6265SDimitry Andric                 CombinedOffset, GA->getTargetFlags());
1933*81ad6265SDimitry Andric             return true;
1934*81ad6265SDimitry Andric           }
1935*81ad6265SDimitry Andric         }
1936*81ad6265SDimitry Andric       }
1937*81ad6265SDimitry Andric 
1938*81ad6265SDimitry Andric       if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
1939*81ad6265SDimitry Andric         Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
1940*81ad6265SDimitry Andric       Offset = CurDAG->getTargetConstant(CVal, DL, VT);
1941*81ad6265SDimitry Andric       return true;
1942*81ad6265SDimitry Andric     }
1943*81ad6265SDimitry Andric   }
1944*81ad6265SDimitry Andric 
1945*81ad6265SDimitry Andric   // Handle ADD with large immediates.
1946*81ad6265SDimitry Andric   if (Addr.getOpcode() == ISD::ADD && isa<ConstantSDNode>(Addr.getOperand(1))) {
1947*81ad6265SDimitry Andric     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1948*81ad6265SDimitry Andric     assert(!isInt<12>(CVal) && "simm12 not already handled?");
1949*81ad6265SDimitry Andric 
1950*81ad6265SDimitry Andric     if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
1951*81ad6265SDimitry Andric       // We can use an ADDI for part of the offset and fold the rest into the
1952*81ad6265SDimitry Andric       // load/store. This mirrors the AddiPair PatFrag in RISCVInstrInfo.td.
1953*81ad6265SDimitry Andric       int64_t Adj = CVal < 0 ? -2048 : 2047;
1954*81ad6265SDimitry Andric       Base = SDValue(
1955*81ad6265SDimitry Andric           CurDAG->getMachineNode(RISCV::ADDI, DL, VT, Addr.getOperand(0),
1956*81ad6265SDimitry Andric                                  CurDAG->getTargetConstant(Adj, DL, VT)),
1957*81ad6265SDimitry Andric           0);
1958*81ad6265SDimitry Andric       Offset = CurDAG->getTargetConstant(CVal - Adj, DL, VT);
1959*81ad6265SDimitry Andric       return true;
1960*81ad6265SDimitry Andric     }
1961*81ad6265SDimitry Andric   }
1962*81ad6265SDimitry Andric 
1963*81ad6265SDimitry Andric   Base = Addr;
1964*81ad6265SDimitry Andric   Offset = CurDAG->getTargetConstant(0, DL, VT);
1965*81ad6265SDimitry Andric   return true;
1966*81ad6265SDimitry Andric }
1967*81ad6265SDimitry Andric 
1968fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
1969fe6060f1SDimitry Andric                                         SDValue &ShAmt) {
1970fe6060f1SDimitry Andric   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
1971fe6060f1SDimitry Andric   // amount. If there is an AND on the shift amount, we can bypass it if it
1972fe6060f1SDimitry Andric   // doesn't affect any of those bits.
1973fe6060f1SDimitry Andric   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
1974fe6060f1SDimitry Andric     const APInt &AndMask = N->getConstantOperandAPInt(1);
1975979e22ffSDimitry Andric 
1976fe6060f1SDimitry Andric     // Since the max shift amount is a power of 2 we can subtract 1 to make a
1977fe6060f1SDimitry Andric     // mask that covers the bits needed to represent all shift amounts.
1978fe6060f1SDimitry Andric     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
1979fe6060f1SDimitry Andric     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
1980e8d8bef9SDimitry Andric 
1981fe6060f1SDimitry Andric     if (ShMask.isSubsetOf(AndMask)) {
1982fe6060f1SDimitry Andric       ShAmt = N.getOperand(0);
1983fe6060f1SDimitry Andric       return true;
1984e8d8bef9SDimitry Andric     }
1985e8d8bef9SDimitry Andric 
1986fe6060f1SDimitry Andric     // SimplifyDemandedBits may have optimized the mask so try restoring any
1987fe6060f1SDimitry Andric     // bits that are known zero.
1988fe6060f1SDimitry Andric     KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
1989fe6060f1SDimitry Andric     if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
1990fe6060f1SDimitry Andric       ShAmt = N.getOperand(0);
1991fe6060f1SDimitry Andric       return true;
1992fe6060f1SDimitry Andric     }
1993*81ad6265SDimitry Andric   } else if (N.getOpcode() == ISD::SUB &&
1994*81ad6265SDimitry Andric              isa<ConstantSDNode>(N.getOperand(0))) {
1995*81ad6265SDimitry Andric     uint64_t Imm = N.getConstantOperandVal(0);
1996*81ad6265SDimitry Andric     // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
1997*81ad6265SDimitry Andric     // generate a NEG instead of a SUB of a constant.
1998*81ad6265SDimitry Andric     if (Imm != 0 && Imm % ShiftWidth == 0) {
1999*81ad6265SDimitry Andric       SDLoc DL(N);
2000*81ad6265SDimitry Andric       EVT VT = N.getValueType();
2001*81ad6265SDimitry Andric       SDValue Zero = CurDAG->getRegister(RISCV::X0, VT);
2002*81ad6265SDimitry Andric       unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
2003*81ad6265SDimitry Andric       MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
2004*81ad6265SDimitry Andric                                                   N.getOperand(1));
2005*81ad6265SDimitry Andric       ShAmt = SDValue(Neg, 0);
2006*81ad6265SDimitry Andric       return true;
2007*81ad6265SDimitry Andric     }
2008fe6060f1SDimitry Andric   }
2009fe6060f1SDimitry Andric 
2010fe6060f1SDimitry Andric   ShAmt = N;
2011fe6060f1SDimitry Andric   return true;
2012fe6060f1SDimitry Andric }
2013fe6060f1SDimitry Andric 
2014fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
2015fe6060f1SDimitry Andric   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
2016fe6060f1SDimitry Andric       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
2017fe6060f1SDimitry Andric     Val = N.getOperand(0);
2018fe6060f1SDimitry Andric     return true;
2019fe6060f1SDimitry Andric   }
2020fe6060f1SDimitry Andric   MVT VT = N.getSimpleValueType();
2021fe6060f1SDimitry Andric   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
2022fe6060f1SDimitry Andric     Val = N;
2023fe6060f1SDimitry Andric     return true;
2024fe6060f1SDimitry Andric   }
2025fe6060f1SDimitry Andric 
2026fe6060f1SDimitry Andric   return false;
2027fe6060f1SDimitry Andric }
2028fe6060f1SDimitry Andric 
2029fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
2030fe6060f1SDimitry Andric   if (N.getOpcode() == ISD::AND) {
2031fe6060f1SDimitry Andric     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
2032fe6060f1SDimitry Andric     if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
2033fe6060f1SDimitry Andric       Val = N.getOperand(0);
2034fe6060f1SDimitry Andric       return true;
2035fe6060f1SDimitry Andric     }
2036fe6060f1SDimitry Andric   }
2037fe6060f1SDimitry Andric   MVT VT = N.getSimpleValueType();
2038fe6060f1SDimitry Andric   APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
2039fe6060f1SDimitry Andric   if (CurDAG->MaskedValueIsZero(N, Mask)) {
2040fe6060f1SDimitry Andric     Val = N;
2041fe6060f1SDimitry Andric     return true;
2042fe6060f1SDimitry Andric   }
2043fe6060f1SDimitry Andric 
2044fe6060f1SDimitry Andric   return false;
2045fe6060f1SDimitry Andric }
2046fe6060f1SDimitry Andric 
2047349cc55cSDimitry Andric // Return true if all users of this SDNode* only consume the lower \p Bits.
2048349cc55cSDimitry Andric // This can be used to form W instructions for add/sub/mul/shl even when the
2049349cc55cSDimitry Andric // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
2050349cc55cSDimitry Andric // SimplifyDemandedBits has made it so some users see a sext_inreg and some
2051349cc55cSDimitry Andric // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
2052349cc55cSDimitry Andric // the add/sub/mul/shl to become non-W instructions. By checking the users we
2053349cc55cSDimitry Andric // may be able to use a W instruction and CSE with the other instruction if
2054349cc55cSDimitry Andric // this has happened. We could try to detect that the CSE opportunity exists
2055349cc55cSDimitry Andric // before doing this, but that would be more complicated.
2056349cc55cSDimitry Andric // TODO: Does this need to look through AND/OR/XOR to their users to find more
2057349cc55cSDimitry Andric // opportunities.
2058349cc55cSDimitry Andric bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const {
2059349cc55cSDimitry Andric   assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
2060349cc55cSDimitry Andric           Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
2061349cc55cSDimitry Andric           Node->getOpcode() == ISD::SRL ||
2062349cc55cSDimitry Andric           Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
2063*81ad6265SDimitry Andric           Node->getOpcode() == RISCVISD::GREV ||
2064*81ad6265SDimitry Andric           Node->getOpcode() == RISCVISD::GORC ||
2065349cc55cSDimitry Andric           isa<ConstantSDNode>(Node)) &&
2066349cc55cSDimitry Andric          "Unexpected opcode");
2067349cc55cSDimitry Andric 
2068349cc55cSDimitry Andric   for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
2069349cc55cSDimitry Andric     SDNode *User = *UI;
2070349cc55cSDimitry Andric     // Users of this node should have already been instruction selected
2071349cc55cSDimitry Andric     if (!User->isMachineOpcode())
2072349cc55cSDimitry Andric       return false;
2073349cc55cSDimitry Andric 
2074349cc55cSDimitry Andric     // TODO: Add more opcodes?
2075349cc55cSDimitry Andric     switch (User->getMachineOpcode()) {
2076349cc55cSDimitry Andric     default:
2077349cc55cSDimitry Andric       return false;
2078349cc55cSDimitry Andric     case RISCV::ADDW:
2079349cc55cSDimitry Andric     case RISCV::ADDIW:
2080349cc55cSDimitry Andric     case RISCV::SUBW:
2081349cc55cSDimitry Andric     case RISCV::MULW:
2082349cc55cSDimitry Andric     case RISCV::SLLW:
2083349cc55cSDimitry Andric     case RISCV::SLLIW:
2084349cc55cSDimitry Andric     case RISCV::SRAW:
2085349cc55cSDimitry Andric     case RISCV::SRAIW:
2086349cc55cSDimitry Andric     case RISCV::SRLW:
2087349cc55cSDimitry Andric     case RISCV::SRLIW:
2088349cc55cSDimitry Andric     case RISCV::DIVW:
2089349cc55cSDimitry Andric     case RISCV::DIVUW:
2090349cc55cSDimitry Andric     case RISCV::REMW:
2091349cc55cSDimitry Andric     case RISCV::REMUW:
2092349cc55cSDimitry Andric     case RISCV::ROLW:
2093349cc55cSDimitry Andric     case RISCV::RORW:
2094349cc55cSDimitry Andric     case RISCV::RORIW:
2095349cc55cSDimitry Andric     case RISCV::CLZW:
2096349cc55cSDimitry Andric     case RISCV::CTZW:
2097349cc55cSDimitry Andric     case RISCV::CPOPW:
20981fd87a68SDimitry Andric     case RISCV::SLLI_UW:
2099*81ad6265SDimitry Andric     case RISCV::FMV_W_X:
2100349cc55cSDimitry Andric     case RISCV::FCVT_H_W:
2101349cc55cSDimitry Andric     case RISCV::FCVT_H_WU:
2102349cc55cSDimitry Andric     case RISCV::FCVT_S_W:
2103349cc55cSDimitry Andric     case RISCV::FCVT_S_WU:
2104349cc55cSDimitry Andric     case RISCV::FCVT_D_W:
2105349cc55cSDimitry Andric     case RISCV::FCVT_D_WU:
2106349cc55cSDimitry Andric       if (Bits < 32)
2107349cc55cSDimitry Andric         return false;
2108349cc55cSDimitry Andric       break;
2109349cc55cSDimitry Andric     case RISCV::SLLI:
2110349cc55cSDimitry Andric       // SLLI only uses the lower (XLen - ShAmt) bits.
2111349cc55cSDimitry Andric       if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
2112349cc55cSDimitry Andric         return false;
2113349cc55cSDimitry Andric       break;
211404eeddc0SDimitry Andric     case RISCV::ANDI:
211504eeddc0SDimitry Andric       if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1))))
211604eeddc0SDimitry Andric         return false;
211704eeddc0SDimitry Andric       break;
21181fd87a68SDimitry Andric     case RISCV::SEXT_B:
211904eeddc0SDimitry Andric       if (Bits < 8)
212004eeddc0SDimitry Andric         return false;
212104eeddc0SDimitry Andric       break;
21221fd87a68SDimitry Andric     case RISCV::SEXT_H:
2123*81ad6265SDimitry Andric     case RISCV::FMV_H_X:
21241fd87a68SDimitry Andric     case RISCV::ZEXT_H_RV32:
21251fd87a68SDimitry Andric     case RISCV::ZEXT_H_RV64:
212604eeddc0SDimitry Andric       if (Bits < 16)
212704eeddc0SDimitry Andric         return false;
212804eeddc0SDimitry Andric       break;
21291fd87a68SDimitry Andric     case RISCV::ADD_UW:
21301fd87a68SDimitry Andric     case RISCV::SH1ADD_UW:
21311fd87a68SDimitry Andric     case RISCV::SH2ADD_UW:
21321fd87a68SDimitry Andric     case RISCV::SH3ADD_UW:
2133349cc55cSDimitry Andric       // The first operand to add.uw/shXadd.uw is implicitly zero extended from
2134349cc55cSDimitry Andric       // 32 bits.
2135349cc55cSDimitry Andric       if (UI.getOperandNo() != 0 || Bits < 32)
2136349cc55cSDimitry Andric         return false;
2137349cc55cSDimitry Andric       break;
2138349cc55cSDimitry Andric     case RISCV::SB:
2139349cc55cSDimitry Andric       if (UI.getOperandNo() != 0 || Bits < 8)
2140349cc55cSDimitry Andric         return false;
2141349cc55cSDimitry Andric       break;
2142349cc55cSDimitry Andric     case RISCV::SH:
2143349cc55cSDimitry Andric       if (UI.getOperandNo() != 0 || Bits < 16)
2144349cc55cSDimitry Andric         return false;
2145349cc55cSDimitry Andric       break;
2146349cc55cSDimitry Andric     case RISCV::SW:
2147349cc55cSDimitry Andric       if (UI.getOperandNo() != 0 || Bits < 32)
2148349cc55cSDimitry Andric         return false;
2149349cc55cSDimitry Andric       break;
2150349cc55cSDimitry Andric     }
2151349cc55cSDimitry Andric   }
2152349cc55cSDimitry Andric 
2153349cc55cSDimitry Andric   return true;
2154349cc55cSDimitry Andric }
2155349cc55cSDimitry Andric 
2156fe6060f1SDimitry Andric // Select VL as a 5 bit immediate or a value that will become a register. This
2157fe6060f1SDimitry Andric // allows us to choose betwen VSETIVLI or VSETVLI later.
2158d409305fSDimitry Andric bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
2159d409305fSDimitry Andric   auto *C = dyn_cast<ConstantSDNode>(N);
2160*81ad6265SDimitry Andric   if (C && isUInt<5>(C->getZExtValue())) {
2161fe6060f1SDimitry Andric     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
2162fe6060f1SDimitry Andric                                    N->getValueType(0));
2163*81ad6265SDimitry Andric   } else if (C && C->isAllOnesValue()) {
2164*81ad6265SDimitry Andric     // Treat all ones as VLMax.
2165*81ad6265SDimitry Andric     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
2166*81ad6265SDimitry Andric                                    N->getValueType(0));
2167*81ad6265SDimitry Andric   } else if (isa<RegisterSDNode>(N) &&
2168*81ad6265SDimitry Andric              cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
2169*81ad6265SDimitry Andric     // All our VL operands use an operand that allows GPRNoX0 or an immediate
2170*81ad6265SDimitry Andric     // as the register class. Convert X0 to a special immediate to pass the
2171*81ad6265SDimitry Andric     // MachineVerifier. This is recognized specially by the vsetvli insertion
2172*81ad6265SDimitry Andric     // pass.
2173*81ad6265SDimitry Andric     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
2174*81ad6265SDimitry Andric                                    N->getValueType(0));
2175*81ad6265SDimitry Andric   } else {
2176d409305fSDimitry Andric     VL = N;
2177*81ad6265SDimitry Andric   }
2178d409305fSDimitry Andric 
2179d409305fSDimitry Andric   return true;
2180d409305fSDimitry Andric }
2181d409305fSDimitry Andric 
2182e8d8bef9SDimitry Andric bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
2183*81ad6265SDimitry Andric   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
2184e8d8bef9SDimitry Andric     return false;
2185*81ad6265SDimitry Andric   SplatVal = N.getOperand(1);
2186979e22ffSDimitry Andric   return true;
2187979e22ffSDimitry Andric }
2188e8d8bef9SDimitry Andric 
2189fe6060f1SDimitry Andric using ValidateFn = bool (*)(int64_t);
2190fe6060f1SDimitry Andric 
2191fe6060f1SDimitry Andric static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
2192fe6060f1SDimitry Andric                                    SelectionDAG &DAG,
2193fe6060f1SDimitry Andric                                    const RISCVSubtarget &Subtarget,
2194fe6060f1SDimitry Andric                                    ValidateFn ValidateImm) {
2195*81ad6265SDimitry Andric   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2196*81ad6265SDimitry Andric       !isa<ConstantSDNode>(N.getOperand(1)))
2197979e22ffSDimitry Andric     return false;
2198e8d8bef9SDimitry Andric 
2199*81ad6265SDimitry Andric   int64_t SplatImm =
2200*81ad6265SDimitry Andric       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2201e8d8bef9SDimitry Andric 
2202*81ad6265SDimitry Andric   // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
2203*81ad6265SDimitry Andric   // type is wider than the resulting vector element type: an implicit
2204*81ad6265SDimitry Andric   // truncation first takes place. Therefore, perform a manual
2205*81ad6265SDimitry Andric   // truncation/sign-extension in order to ignore any truncated bits and catch
2206*81ad6265SDimitry Andric   // any zero-extended immediate.
2207e8d8bef9SDimitry Andric   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
2208e8d8bef9SDimitry Andric   // sign-extending to (XLenVT -1).
2209fe6060f1SDimitry Andric   MVT XLenVT = Subtarget.getXLenVT();
2210*81ad6265SDimitry Andric   assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
2211e8d8bef9SDimitry Andric          "Unexpected splat operand type");
2212fe6060f1SDimitry Andric   MVT EltVT = N.getSimpleValueType().getVectorElementType();
2213fe6060f1SDimitry Andric   if (EltVT.bitsLT(XLenVT))
2214e8d8bef9SDimitry Andric     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
2215979e22ffSDimitry Andric 
2216fe6060f1SDimitry Andric   if (!ValidateImm(SplatImm))
2217e8d8bef9SDimitry Andric     return false;
2218979e22ffSDimitry Andric 
2219fe6060f1SDimitry Andric   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
2220979e22ffSDimitry Andric   return true;
2221979e22ffSDimitry Andric }
2222e8d8bef9SDimitry Andric 
2223fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
2224fe6060f1SDimitry Andric   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
2225fe6060f1SDimitry Andric                                 [](int64_t Imm) { return isInt<5>(Imm); });
2226fe6060f1SDimitry Andric }
2227fe6060f1SDimitry Andric 
2228fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
2229fe6060f1SDimitry Andric   return selectVSplatSimmHelper(
2230fe6060f1SDimitry Andric       N, SplatVal, *CurDAG, *Subtarget,
2231fe6060f1SDimitry Andric       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
2232fe6060f1SDimitry Andric }
2233fe6060f1SDimitry Andric 
2234fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
2235fe6060f1SDimitry Andric                                                       SDValue &SplatVal) {
2236fe6060f1SDimitry Andric   return selectVSplatSimmHelper(
2237fe6060f1SDimitry Andric       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
2238fe6060f1SDimitry Andric         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
2239fe6060f1SDimitry Andric       });
2240fe6060f1SDimitry Andric }
2241fe6060f1SDimitry Andric 
2242e8d8bef9SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
2243*81ad6265SDimitry Andric   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
2244*81ad6265SDimitry Andric       !isa<ConstantSDNode>(N.getOperand(1)))
2245979e22ffSDimitry Andric     return false;
2246979e22ffSDimitry Andric 
2247*81ad6265SDimitry Andric   int64_t SplatImm =
2248*81ad6265SDimitry Andric       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2249979e22ffSDimitry Andric 
2250e8d8bef9SDimitry Andric   if (!isUInt<5>(SplatImm))
2251e8d8bef9SDimitry Andric     return false;
2252e8d8bef9SDimitry Andric 
2253e8d8bef9SDimitry Andric   SplatVal =
2254e8d8bef9SDimitry Andric       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
2255e8d8bef9SDimitry Andric 
2256979e22ffSDimitry Andric   return true;
2257979e22ffSDimitry Andric }
2258979e22ffSDimitry Andric 
2259fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
2260fe6060f1SDimitry Andric                                        SDValue &Imm) {
2261fe6060f1SDimitry Andric   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
2262fe6060f1SDimitry Andric     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
2263fe6060f1SDimitry Andric 
2264fe6060f1SDimitry Andric     if (!isInt<5>(ImmVal))
2265fe6060f1SDimitry Andric       return false;
2266fe6060f1SDimitry Andric 
2267fe6060f1SDimitry Andric     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
2268fe6060f1SDimitry Andric     return true;
2269fe6060f1SDimitry Andric   }
2270fe6060f1SDimitry Andric 
2271fe6060f1SDimitry Andric   return false;
2272fe6060f1SDimitry Andric }
2273fe6060f1SDimitry Andric 
22740b57cec5SDimitry Andric // Merge an ADDI into the offset of a load/store instruction where possible.
22755ffd83dbSDimitry Andric // (load (addi base, off1), off2) -> (load base, off1+off2)
22765ffd83dbSDimitry Andric // (store val, (addi base, off1), off2) -> (store val, base, off1+off2)
2277*81ad6265SDimitry Andric // (load (add base, (addi src, off1)), off2)
2278*81ad6265SDimitry Andric //    -> (load (add base, src), off1+off2)
2279*81ad6265SDimitry Andric // (store val, (add base, (addi src, off1)), off2)
2280*81ad6265SDimitry Andric //    -> (store val, (add base, src), off1+off2)
22815ffd83dbSDimitry Andric // This is possible when off1+off2 fits a 12-bit immediate.
2282349cc55cSDimitry Andric bool RISCVDAGToDAGISel::doPeepholeLoadStoreADDI(SDNode *N) {
2283*81ad6265SDimitry Andric   unsigned OffsetOpIdx, BaseOpIdx;
2284*81ad6265SDimitry Andric   if (!hasMemOffset(N, BaseOpIdx, OffsetOpIdx))
2285349cc55cSDimitry Andric     return false;
22860b57cec5SDimitry Andric 
22875ffd83dbSDimitry Andric   if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx)))
2288349cc55cSDimitry Andric     return false;
22890b57cec5SDimitry Andric 
22900b57cec5SDimitry Andric   SDValue Base = N->getOperand(BaseOpIdx);
22910b57cec5SDimitry Andric 
2292*81ad6265SDimitry Andric   if (!Base.isMachineOpcode())
2293*81ad6265SDimitry Andric     return false;
2294*81ad6265SDimitry Andric 
2295*81ad6265SDimitry Andric   if (Base.getMachineOpcode() == RISCV::ADDI) {
22960b57cec5SDimitry Andric     // If the base is an ADDI, we can merge it in to the load/store.
2297*81ad6265SDimitry Andric   } else if (Base.getMachineOpcode() == RISCV::ADDIW &&
2298*81ad6265SDimitry Andric              isa<ConstantSDNode>(Base.getOperand(1)) &&
2299*81ad6265SDimitry Andric              Base.getOperand(0).isMachineOpcode() &&
2300*81ad6265SDimitry Andric              Base.getOperand(0).getMachineOpcode() == RISCV::LUI &&
2301*81ad6265SDimitry Andric              isa<ConstantSDNode>(Base.getOperand(0).getOperand(0))) {
2302*81ad6265SDimitry Andric     // ADDIW can be merged if it's part of LUI+ADDIW constant materialization
2303*81ad6265SDimitry Andric     // and LUI+ADDI would have produced the same result. This is true for all
2304*81ad6265SDimitry Andric     // simm32 values except 0x7ffff800-0x7fffffff.
2305*81ad6265SDimitry Andric     int64_t Offset =
2306*81ad6265SDimitry Andric       SignExtend64<32>(Base.getOperand(0).getConstantOperandVal(0) << 12);
2307*81ad6265SDimitry Andric     Offset += cast<ConstantSDNode>(Base.getOperand(1))->getSExtValue();
2308*81ad6265SDimitry Andric     if (!isInt<32>(Offset))
2309*81ad6265SDimitry Andric       return false;
2310*81ad6265SDimitry Andric   } else
2311349cc55cSDimitry Andric    return false;
23120b57cec5SDimitry Andric 
23130b57cec5SDimitry Andric   SDValue ImmOperand = Base.getOperand(1);
23145ffd83dbSDimitry Andric   uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx);
23150b57cec5SDimitry Andric 
2316fe6060f1SDimitry Andric   if (auto *Const = dyn_cast<ConstantSDNode>(ImmOperand)) {
23175ffd83dbSDimitry Andric     int64_t Offset1 = Const->getSExtValue();
23185ffd83dbSDimitry Andric     int64_t CombinedOffset = Offset1 + Offset2;
23195ffd83dbSDimitry Andric     if (!isInt<12>(CombinedOffset))
2320349cc55cSDimitry Andric       return false;
23215ffd83dbSDimitry Andric     ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand),
23225ffd83dbSDimitry Andric                                            ImmOperand.getValueType());
2323fe6060f1SDimitry Andric   } else if (auto *GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) {
23245ffd83dbSDimitry Andric     // If the off1 in (addi base, off1) is a global variable's address (its
23255ffd83dbSDimitry Andric     // low part, really), then we can rely on the alignment of that variable
23265ffd83dbSDimitry Andric     // to provide a margin of safety before off1 can overflow the 12 bits.
23275ffd83dbSDimitry Andric     // Check if off2 falls within that margin; if so off1+off2 can't overflow.
23285ffd83dbSDimitry Andric     const DataLayout &DL = CurDAG->getDataLayout();
2329*81ad6265SDimitry Andric     Align Alignment = commonAlignment(GA->getGlobal()->getPointerAlignment(DL),
2330*81ad6265SDimitry Andric                                       GA->getOffset());
23315ffd83dbSDimitry Andric     if (Offset2 != 0 && Alignment <= Offset2)
2332349cc55cSDimitry Andric       return false;
23335ffd83dbSDimitry Andric     int64_t Offset1 = GA->getOffset();
23345ffd83dbSDimitry Andric     int64_t CombinedOffset = Offset1 + Offset2;
23350b57cec5SDimitry Andric     ImmOperand = CurDAG->getTargetGlobalAddress(
23360b57cec5SDimitry Andric         GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(),
23375ffd83dbSDimitry Andric         CombinedOffset, GA->getTargetFlags());
2338fe6060f1SDimitry Andric   } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) {
23395ffd83dbSDimitry Andric     // Ditto.
2340*81ad6265SDimitry Andric     Align Alignment = commonAlignment(CP->getAlign(), CP->getOffset());
23415ffd83dbSDimitry Andric     if (Offset2 != 0 && Alignment <= Offset2)
2342349cc55cSDimitry Andric       return false;
23435ffd83dbSDimitry Andric     int64_t Offset1 = CP->getOffset();
23445ffd83dbSDimitry Andric     int64_t CombinedOffset = Offset1 + Offset2;
23455ffd83dbSDimitry Andric     ImmOperand = CurDAG->getTargetConstantPool(
23465ffd83dbSDimitry Andric         CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(),
23475ffd83dbSDimitry Andric         CombinedOffset, CP->getTargetFlags());
23480b57cec5SDimitry Andric   } else {
2349349cc55cSDimitry Andric     return false;
23500b57cec5SDimitry Andric   }
23510b57cec5SDimitry Andric 
23520b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase:    ");
23530b57cec5SDimitry Andric   LLVM_DEBUG(Base->dump(CurDAG));
23540b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "\nN: ");
23550b57cec5SDimitry Andric   LLVM_DEBUG(N->dump(CurDAG));
23560b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "\n");
23570b57cec5SDimitry Andric 
23580b57cec5SDimitry Andric   // Modify the offset operand of the load/store.
2359*81ad6265SDimitry Andric   if (BaseOpIdx == 0) { // Load
2360*81ad6265SDimitry Andric     N = CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand,
23610b57cec5SDimitry Andric                                    N->getOperand(2));
2362*81ad6265SDimitry Andric   } else { // Store
2363*81ad6265SDimitry Andric     N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0),
23640b57cec5SDimitry Andric                                    ImmOperand, N->getOperand(3));
2365*81ad6265SDimitry Andric   }
23660b57cec5SDimitry Andric 
2367349cc55cSDimitry Andric   return true;
23680b57cec5SDimitry Andric }
2369349cc55cSDimitry Andric 
2370349cc55cSDimitry Andric // Try to remove sext.w if the input is a W instruction or can be made into
2371349cc55cSDimitry Andric // a W instruction cheaply.
2372349cc55cSDimitry Andric bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
2373349cc55cSDimitry Andric   // Look for the sext.w pattern, addiw rd, rs1, 0.
2374349cc55cSDimitry Andric   if (N->getMachineOpcode() != RISCV::ADDIW ||
2375349cc55cSDimitry Andric       !isNullConstant(N->getOperand(1)))
2376349cc55cSDimitry Andric     return false;
2377349cc55cSDimitry Andric 
2378349cc55cSDimitry Andric   SDValue N0 = N->getOperand(0);
2379349cc55cSDimitry Andric   if (!N0.isMachineOpcode())
2380349cc55cSDimitry Andric     return false;
2381349cc55cSDimitry Andric 
2382349cc55cSDimitry Andric   switch (N0.getMachineOpcode()) {
2383349cc55cSDimitry Andric   default:
2384349cc55cSDimitry Andric     break;
2385349cc55cSDimitry Andric   case RISCV::ADD:
2386349cc55cSDimitry Andric   case RISCV::ADDI:
2387349cc55cSDimitry Andric   case RISCV::SUB:
2388349cc55cSDimitry Andric   case RISCV::MUL:
2389349cc55cSDimitry Andric   case RISCV::SLLI: {
2390349cc55cSDimitry Andric     // Convert sext.w+add/sub/mul to their W instructions. This will create
2391349cc55cSDimitry Andric     // a new independent instruction. This improves latency.
2392349cc55cSDimitry Andric     unsigned Opc;
2393349cc55cSDimitry Andric     switch (N0.getMachineOpcode()) {
2394349cc55cSDimitry Andric     default:
2395349cc55cSDimitry Andric       llvm_unreachable("Unexpected opcode!");
2396349cc55cSDimitry Andric     case RISCV::ADD:  Opc = RISCV::ADDW;  break;
2397349cc55cSDimitry Andric     case RISCV::ADDI: Opc = RISCV::ADDIW; break;
2398349cc55cSDimitry Andric     case RISCV::SUB:  Opc = RISCV::SUBW;  break;
2399349cc55cSDimitry Andric     case RISCV::MUL:  Opc = RISCV::MULW;  break;
2400349cc55cSDimitry Andric     case RISCV::SLLI: Opc = RISCV::SLLIW; break;
2401349cc55cSDimitry Andric     }
2402349cc55cSDimitry Andric 
2403349cc55cSDimitry Andric     SDValue N00 = N0.getOperand(0);
2404349cc55cSDimitry Andric     SDValue N01 = N0.getOperand(1);
2405349cc55cSDimitry Andric 
2406349cc55cSDimitry Andric     // Shift amount needs to be uimm5.
2407349cc55cSDimitry Andric     if (N0.getMachineOpcode() == RISCV::SLLI &&
2408349cc55cSDimitry Andric         !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2409349cc55cSDimitry Andric       break;
2410349cc55cSDimitry Andric 
2411349cc55cSDimitry Andric     SDNode *Result =
2412349cc55cSDimitry Andric         CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
2413349cc55cSDimitry Andric                                N00, N01);
2414349cc55cSDimitry Andric     ReplaceUses(N, Result);
2415349cc55cSDimitry Andric     return true;
2416349cc55cSDimitry Andric   }
2417349cc55cSDimitry Andric   case RISCV::ADDW:
2418349cc55cSDimitry Andric   case RISCV::ADDIW:
2419349cc55cSDimitry Andric   case RISCV::SUBW:
2420349cc55cSDimitry Andric   case RISCV::MULW:
2421349cc55cSDimitry Andric   case RISCV::SLLIW:
2422*81ad6265SDimitry Andric   case RISCV::GREVIW:
2423*81ad6265SDimitry Andric   case RISCV::GORCIW:
2424349cc55cSDimitry Andric     // Result is already sign extended just remove the sext.w.
2425349cc55cSDimitry Andric     // NOTE: We only handle the nodes that are selected with hasAllWUsers.
2426349cc55cSDimitry Andric     ReplaceUses(N, N0.getNode());
2427349cc55cSDimitry Andric     return true;
2428349cc55cSDimitry Andric   }
2429349cc55cSDimitry Andric 
2430349cc55cSDimitry Andric   return false;
24310b57cec5SDimitry Andric }
24320b57cec5SDimitry Andric 
2433*81ad6265SDimitry Andric // Optimize masked RVV pseudo instructions with a known all-ones mask to their
2434*81ad6265SDimitry Andric // corresponding "unmasked" pseudo versions. The mask we're interested in will
2435*81ad6265SDimitry Andric // take the form of a V0 physical register operand, with a glued
2436*81ad6265SDimitry Andric // register-setting instruction.
2437*81ad6265SDimitry Andric bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
2438*81ad6265SDimitry Andric   const RISCV::RISCVMaskedPseudoInfo *I =
2439*81ad6265SDimitry Andric       RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
2440*81ad6265SDimitry Andric   if (!I)
2441*81ad6265SDimitry Andric     return false;
2442*81ad6265SDimitry Andric 
2443*81ad6265SDimitry Andric   unsigned MaskOpIdx = I->MaskOpIdx;
2444*81ad6265SDimitry Andric 
2445*81ad6265SDimitry Andric   // Check that we're using V0 as a mask register.
2446*81ad6265SDimitry Andric   if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
2447*81ad6265SDimitry Andric       cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
2448*81ad6265SDimitry Andric     return false;
2449*81ad6265SDimitry Andric 
2450*81ad6265SDimitry Andric   // The glued user defines V0.
2451*81ad6265SDimitry Andric   const auto *Glued = N->getGluedNode();
2452*81ad6265SDimitry Andric 
2453*81ad6265SDimitry Andric   if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
2454*81ad6265SDimitry Andric     return false;
2455*81ad6265SDimitry Andric 
2456*81ad6265SDimitry Andric   // Check that we're defining V0 as a mask register.
2457*81ad6265SDimitry Andric   if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
2458*81ad6265SDimitry Andric       cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
2459*81ad6265SDimitry Andric     return false;
2460*81ad6265SDimitry Andric 
2461*81ad6265SDimitry Andric   // Check the instruction defining V0; it needs to be a VMSET pseudo.
2462*81ad6265SDimitry Andric   SDValue MaskSetter = Glued->getOperand(2);
2463*81ad6265SDimitry Andric 
2464*81ad6265SDimitry Andric   const auto IsVMSet = [](unsigned Opc) {
2465*81ad6265SDimitry Andric     return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
2466*81ad6265SDimitry Andric            Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
2467*81ad6265SDimitry Andric            Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
2468*81ad6265SDimitry Andric            Opc == RISCV::PseudoVMSET_M_B8;
2469*81ad6265SDimitry Andric   };
2470*81ad6265SDimitry Andric 
2471*81ad6265SDimitry Andric   // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
2472*81ad6265SDimitry Andric   // undefined behaviour if it's the wrong bitwidth, so we could choose to
2473*81ad6265SDimitry Andric   // assume that it's all-ones? Same applies to its VL.
2474*81ad6265SDimitry Andric   if (!MaskSetter->isMachineOpcode() || !IsVMSet(MaskSetter.getMachineOpcode()))
2475*81ad6265SDimitry Andric     return false;
2476*81ad6265SDimitry Andric 
2477*81ad6265SDimitry Andric   // Retrieve the tail policy operand index, if any.
2478*81ad6265SDimitry Andric   Optional<unsigned> TailPolicyOpIdx;
2479*81ad6265SDimitry Andric   const RISCVInstrInfo &TII = *Subtarget->getInstrInfo();
2480*81ad6265SDimitry Andric   const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode());
2481*81ad6265SDimitry Andric 
2482*81ad6265SDimitry Andric   bool IsTA = true;
2483*81ad6265SDimitry Andric   if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
2484*81ad6265SDimitry Andric     // The last operand of the pseudo is the policy op, but we might have a
2485*81ad6265SDimitry Andric     // Glue operand last. We might also have a chain.
2486*81ad6265SDimitry Andric     TailPolicyOpIdx = N->getNumOperands() - 1;
2487*81ad6265SDimitry Andric     if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Glue)
2488*81ad6265SDimitry Andric       (*TailPolicyOpIdx)--;
2489*81ad6265SDimitry Andric     if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Other)
2490*81ad6265SDimitry Andric       (*TailPolicyOpIdx)--;
2491*81ad6265SDimitry Andric 
2492*81ad6265SDimitry Andric     if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
2493*81ad6265SDimitry Andric           RISCVII::TAIL_AGNOSTIC)) {
2494*81ad6265SDimitry Andric       // Keep the true-masked instruction when there is no unmasked TU
2495*81ad6265SDimitry Andric       // instruction
2496*81ad6265SDimitry Andric       if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
2497*81ad6265SDimitry Andric         return false;
2498*81ad6265SDimitry Andric       // We can't use TA if the tie-operand is not IMPLICIT_DEF
2499*81ad6265SDimitry Andric       if (!N->getOperand(0).isUndef())
2500*81ad6265SDimitry Andric         IsTA = false;
2501*81ad6265SDimitry Andric     }
2502*81ad6265SDimitry Andric   }
2503*81ad6265SDimitry Andric 
2504*81ad6265SDimitry Andric   unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
2505*81ad6265SDimitry Andric 
2506*81ad6265SDimitry Andric   // Check that we're dropping the mask operand and any policy operand
2507*81ad6265SDimitry Andric   // when we transform to this unmasked pseudo. Additionally, if this insturtion
2508*81ad6265SDimitry Andric   // is tail agnostic, the unmasked instruction should not have a merge op.
2509*81ad6265SDimitry Andric   uint64_t TSFlags = TII.get(Opc).TSFlags;
2510*81ad6265SDimitry Andric   assert((IsTA != RISCVII::hasMergeOp(TSFlags)) &&
2511*81ad6265SDimitry Andric          RISCVII::hasDummyMaskOp(TSFlags) &&
2512*81ad6265SDimitry Andric          !RISCVII::hasVecPolicyOp(TSFlags) &&
2513*81ad6265SDimitry Andric          "Unexpected pseudo to transform to");
2514*81ad6265SDimitry Andric   (void)TSFlags;
2515*81ad6265SDimitry Andric 
2516*81ad6265SDimitry Andric   SmallVector<SDValue, 8> Ops;
2517*81ad6265SDimitry Andric   // Skip the merge operand at index 0 if IsTA
2518*81ad6265SDimitry Andric   for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
2519*81ad6265SDimitry Andric     // Skip the mask, the policy, and the Glue.
2520*81ad6265SDimitry Andric     SDValue Op = N->getOperand(I);
2521*81ad6265SDimitry Andric     if (I == MaskOpIdx || I == TailPolicyOpIdx ||
2522*81ad6265SDimitry Andric         Op.getValueType() == MVT::Glue)
2523*81ad6265SDimitry Andric       continue;
2524*81ad6265SDimitry Andric     Ops.push_back(Op);
2525*81ad6265SDimitry Andric   }
2526*81ad6265SDimitry Andric 
2527*81ad6265SDimitry Andric   // Transitively apply any node glued to our new node.
2528*81ad6265SDimitry Andric   if (auto *TGlued = Glued->getGluedNode())
2529*81ad6265SDimitry Andric     Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
2530*81ad6265SDimitry Andric 
2531*81ad6265SDimitry Andric   SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
2532*81ad6265SDimitry Andric   ReplaceUses(N, Result);
2533*81ad6265SDimitry Andric 
2534*81ad6265SDimitry Andric   return true;
2535*81ad6265SDimitry Andric }
2536*81ad6265SDimitry Andric 
25370b57cec5SDimitry Andric // This pass converts a legalized DAG into a RISCV-specific DAG, ready
25380b57cec5SDimitry Andric // for instruction scheduling.
2539*81ad6265SDimitry Andric FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM,
2540*81ad6265SDimitry Andric                                        CodeGenOpt::Level OptLevel) {
2541*81ad6265SDimitry Andric   return new RISCVDAGToDAGISel(TM, OptLevel);
25420b57cec5SDimitry Andric }
2543