xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (revision bdd1243df58e60e85101c09001d9812a789b6bc4)
10b57cec5SDimitry Andric //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file defines an instruction selector for the RISCV target.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
120b57cec5SDimitry Andric 
135ffd83dbSDimitry Andric #include "RISCVISelDAGToDAG.h"
140b57cec5SDimitry Andric #include "MCTargetDesc/RISCVMCTargetDesc.h"
15e8d8bef9SDimitry Andric #include "MCTargetDesc/RISCVMatInt.h"
16fe6060f1SDimitry Andric #include "RISCVISelLowering.h"
17fe6060f1SDimitry Andric #include "RISCVMachineFunctionInfo.h"
180b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFrameInfo.h"
19e8d8bef9SDimitry Andric #include "llvm/IR/IntrinsicsRISCV.h"
205ffd83dbSDimitry Andric #include "llvm/Support/Alignment.h"
210b57cec5SDimitry Andric #include "llvm/Support/Debug.h"
220b57cec5SDimitry Andric #include "llvm/Support/MathExtras.h"
230b57cec5SDimitry Andric #include "llvm/Support/raw_ostream.h"
24*bdd1243dSDimitry Andric #include <optional>
255ffd83dbSDimitry Andric 
260b57cec5SDimitry Andric using namespace llvm;
270b57cec5SDimitry Andric 
280b57cec5SDimitry Andric #define DEBUG_TYPE "riscv-isel"
29*bdd1243dSDimitry Andric #define PASS_NAME "RISCV DAG->DAG Pattern Instruction Selection"
300b57cec5SDimitry Andric 
31*bdd1243dSDimitry Andric namespace llvm::RISCV {
32fe6060f1SDimitry Andric #define GET_RISCVVSSEGTable_IMPL
33fe6060f1SDimitry Andric #define GET_RISCVVLSEGTable_IMPL
34fe6060f1SDimitry Andric #define GET_RISCVVLXSEGTable_IMPL
35fe6060f1SDimitry Andric #define GET_RISCVVSXSEGTable_IMPL
36fe6060f1SDimitry Andric #define GET_RISCVVLETable_IMPL
37fe6060f1SDimitry Andric #define GET_RISCVVSETable_IMPL
38fe6060f1SDimitry Andric #define GET_RISCVVLXTable_IMPL
39fe6060f1SDimitry Andric #define GET_RISCVVSXTable_IMPL
4081ad6265SDimitry Andric #define GET_RISCVMaskedPseudosTable_IMPL
41fe6060f1SDimitry Andric #include "RISCVGenSearchableTables.inc"
42*bdd1243dSDimitry Andric } // namespace llvm::RISCV
43*bdd1243dSDimitry Andric 
44*bdd1243dSDimitry Andric static unsigned getLastNonGlueOrChainOpIdx(const SDNode *Node) {
45*bdd1243dSDimitry Andric   assert(Node->getNumOperands() > 0 && "Node with no operands");
46*bdd1243dSDimitry Andric   unsigned LastOpIdx = Node->getNumOperands() - 1;
47*bdd1243dSDimitry Andric   if (Node->getOperand(LastOpIdx).getValueType() == MVT::Glue)
48*bdd1243dSDimitry Andric     --LastOpIdx;
49*bdd1243dSDimitry Andric   if (Node->getOperand(LastOpIdx).getValueType() == MVT::Other)
50*bdd1243dSDimitry Andric     --LastOpIdx;
51*bdd1243dSDimitry Andric   return LastOpIdx;
52*bdd1243dSDimitry Andric }
53*bdd1243dSDimitry Andric 
54*bdd1243dSDimitry Andric static unsigned getVecPolicyOpIdx(const SDNode *Node, const MCInstrDesc &MCID) {
55*bdd1243dSDimitry Andric   assert(RISCVII::hasVecPolicyOp(MCID.TSFlags));
56*bdd1243dSDimitry Andric   (void)MCID;
57*bdd1243dSDimitry Andric   return getLastNonGlueOrChainOpIdx(Node);
58*bdd1243dSDimitry Andric }
59fe6060f1SDimitry Andric 
60fe6060f1SDimitry Andric void RISCVDAGToDAGISel::PreprocessISelDAG() {
61753f127fSDimitry Andric   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
62fe6060f1SDimitry Andric 
63753f127fSDimitry Andric   bool MadeChange = false;
64753f127fSDimitry Andric   while (Position != CurDAG->allnodes_begin()) {
65753f127fSDimitry Andric     SDNode *N = &*--Position;
66753f127fSDimitry Andric     if (N->use_empty())
67753f127fSDimitry Andric       continue;
68753f127fSDimitry Andric 
69753f127fSDimitry Andric     SDValue Result;
70753f127fSDimitry Andric     switch (N->getOpcode()) {
71753f127fSDimitry Andric     case ISD::SPLAT_VECTOR: {
7281ad6265SDimitry Andric       // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
7381ad6265SDimitry Andric       // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
7481ad6265SDimitry Andric       MVT VT = N->getSimpleValueType(0);
7581ad6265SDimitry Andric       unsigned Opc =
7681ad6265SDimitry Andric           VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL;
7781ad6265SDimitry Andric       SDLoc DL(N);
7881ad6265SDimitry Andric       SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT());
79753f127fSDimitry Andric       Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT),
8081ad6265SDimitry Andric                                N->getOperand(0), VL);
81753f127fSDimitry Andric       break;
8281ad6265SDimitry Andric     }
83753f127fSDimitry Andric     case RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL: {
84fe6060f1SDimitry Andric       // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector
85fe6060f1SDimitry Andric       // load. Done after lowering and combining so that we have a chance to
86fe6060f1SDimitry Andric       // optimize this to VMV_V_X_VL when the upper bits aren't needed.
8781ad6265SDimitry Andric       assert(N->getNumOperands() == 4 && "Unexpected number of operands");
88fe6060f1SDimitry Andric       MVT VT = N->getSimpleValueType(0);
8981ad6265SDimitry Andric       SDValue Passthru = N->getOperand(0);
9081ad6265SDimitry Andric       SDValue Lo = N->getOperand(1);
9181ad6265SDimitry Andric       SDValue Hi = N->getOperand(2);
9281ad6265SDimitry Andric       SDValue VL = N->getOperand(3);
93fe6060f1SDimitry Andric       assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
94fe6060f1SDimitry Andric              Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
95fe6060f1SDimitry Andric              "Unexpected VTs!");
96fe6060f1SDimitry Andric       MachineFunction &MF = CurDAG->getMachineFunction();
97753f127fSDimitry Andric       RISCVMachineFunctionInfo *FuncInfo =
98753f127fSDimitry Andric           MF.getInfo<RISCVMachineFunctionInfo>();
99fe6060f1SDimitry Andric       SDLoc DL(N);
100fe6060f1SDimitry Andric 
101fe6060f1SDimitry Andric       // We use the same frame index we use for moving two i32s into 64-bit FPR.
102fe6060f1SDimitry Andric       // This is an analogous operation.
103fe6060f1SDimitry Andric       int FI = FuncInfo->getMoveF64FrameIndex(MF);
104fe6060f1SDimitry Andric       MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
105fe6060f1SDimitry Andric       const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
106fe6060f1SDimitry Andric       SDValue StackSlot =
107fe6060f1SDimitry Andric           CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout()));
108fe6060f1SDimitry Andric 
109fe6060f1SDimitry Andric       SDValue Chain = CurDAG->getEntryNode();
110fe6060f1SDimitry Andric       Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
111fe6060f1SDimitry Andric 
112fe6060f1SDimitry Andric       SDValue OffsetSlot =
113fe6060f1SDimitry Andric           CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
114fe6060f1SDimitry Andric       Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4),
115fe6060f1SDimitry Andric                             Align(8));
116fe6060f1SDimitry Andric 
117fe6060f1SDimitry Andric       Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
118fe6060f1SDimitry Andric 
119fe6060f1SDimitry Andric       SDVTList VTs = CurDAG->getVTList({VT, MVT::Other});
120fe6060f1SDimitry Andric       SDValue IntID =
121fe6060f1SDimitry Andric           CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
12204eeddc0SDimitry Andric       SDValue Ops[] = {Chain,
12304eeddc0SDimitry Andric                        IntID,
12481ad6265SDimitry Andric                        Passthru,
12504eeddc0SDimitry Andric                        StackSlot,
12604eeddc0SDimitry Andric                        CurDAG->getRegister(RISCV::X0, MVT::i64),
12704eeddc0SDimitry Andric                        VL};
128fe6060f1SDimitry Andric 
129753f127fSDimitry Andric       Result = CurDAG->getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
130753f127fSDimitry Andric                                            MVT::i64, MPI, Align(8),
131fe6060f1SDimitry Andric                                            MachineMemOperand::MOLoad);
132753f127fSDimitry Andric       break;
133fe6060f1SDimitry Andric     }
134fe6060f1SDimitry Andric     }
135fe6060f1SDimitry Andric 
136753f127fSDimitry Andric     if (Result) {
137753f127fSDimitry Andric       LLVM_DEBUG(dbgs() << "RISCV DAG preprocessing replacing:\nOld:    ");
138753f127fSDimitry Andric       LLVM_DEBUG(N->dump(CurDAG));
139753f127fSDimitry Andric       LLVM_DEBUG(dbgs() << "\nNew: ");
140753f127fSDimitry Andric       LLVM_DEBUG(Result->dump(CurDAG));
141753f127fSDimitry Andric       LLVM_DEBUG(dbgs() << "\n");
142753f127fSDimitry Andric 
143753f127fSDimitry Andric       CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
144753f127fSDimitry Andric       MadeChange = true;
145753f127fSDimitry Andric     }
146753f127fSDimitry Andric   }
147753f127fSDimitry Andric 
148753f127fSDimitry Andric   if (MadeChange)
149753f127fSDimitry Andric     CurDAG->RemoveDeadNodes();
150753f127fSDimitry Andric }
151753f127fSDimitry Andric 
1520b57cec5SDimitry Andric void RISCVDAGToDAGISel::PostprocessISelDAG() {
15381ad6265SDimitry Andric   HandleSDNode Dummy(CurDAG->getRoot());
154349cc55cSDimitry Andric   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
155349cc55cSDimitry Andric 
156349cc55cSDimitry Andric   bool MadeChange = false;
157349cc55cSDimitry Andric   while (Position != CurDAG->allnodes_begin()) {
158349cc55cSDimitry Andric     SDNode *N = &*--Position;
159349cc55cSDimitry Andric     // Skip dead nodes and any non-machine opcodes.
160349cc55cSDimitry Andric     if (N->use_empty() || !N->isMachineOpcode())
161349cc55cSDimitry Andric       continue;
162349cc55cSDimitry Andric 
163349cc55cSDimitry Andric     MadeChange |= doPeepholeSExtW(N);
16481ad6265SDimitry Andric     MadeChange |= doPeepholeMaskedRVV(N);
165349cc55cSDimitry Andric   }
166349cc55cSDimitry Andric 
16781ad6265SDimitry Andric   CurDAG->setRoot(Dummy.getValue());
16881ad6265SDimitry Andric 
169*bdd1243dSDimitry Andric   MadeChange |= doPeepholeMergeVVMFold();
170*bdd1243dSDimitry Andric 
171349cc55cSDimitry Andric   if (MadeChange)
172349cc55cSDimitry Andric     CurDAG->RemoveDeadNodes();
1730b57cec5SDimitry Andric }
1740b57cec5SDimitry Andric 
17581ad6265SDimitry Andric static SDNode *selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
17681ad6265SDimitry Andric                             RISCVMatInt::InstSeq &Seq) {
1778bcb0991SDimitry Andric   SDNode *Result = nullptr;
17881ad6265SDimitry Andric   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT);
1790b57cec5SDimitry Andric   for (RISCVMatInt::Inst &Inst : Seq) {
180*bdd1243dSDimitry Andric     SDValue SDImm = CurDAG->getTargetConstant(Inst.getImm(), DL, VT);
18181ad6265SDimitry Andric     switch (Inst.getOpndKind()) {
18281ad6265SDimitry Andric     case RISCVMatInt::Imm:
183*bdd1243dSDimitry Andric       Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SDImm);
18481ad6265SDimitry Andric       break;
18581ad6265SDimitry Andric     case RISCVMatInt::RegX0:
186*bdd1243dSDimitry Andric       Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg,
18781ad6265SDimitry Andric                                       CurDAG->getRegister(RISCV::X0, VT));
18881ad6265SDimitry Andric       break;
18981ad6265SDimitry Andric     case RISCVMatInt::RegReg:
190*bdd1243dSDimitry Andric       Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SrcReg);
19181ad6265SDimitry Andric       break;
19281ad6265SDimitry Andric     case RISCVMatInt::RegImm:
193*bdd1243dSDimitry Andric       Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SDImm);
19481ad6265SDimitry Andric       break;
19581ad6265SDimitry Andric     }
1960b57cec5SDimitry Andric 
1970b57cec5SDimitry Andric     // Only the first instruction has X0 as its source.
1980b57cec5SDimitry Andric     SrcReg = SDValue(Result, 0);
1990b57cec5SDimitry Andric   }
2000b57cec5SDimitry Andric 
2010b57cec5SDimitry Andric   return Result;
2020b57cec5SDimitry Andric }
2030b57cec5SDimitry Andric 
20481ad6265SDimitry Andric static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
20581ad6265SDimitry Andric                          int64_t Imm, const RISCVSubtarget &Subtarget) {
20681ad6265SDimitry Andric   RISCVMatInt::InstSeq Seq =
20781ad6265SDimitry Andric       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
20881ad6265SDimitry Andric 
20981ad6265SDimitry Andric   return selectImmSeq(CurDAG, DL, VT, Seq);
21081ad6265SDimitry Andric }
21181ad6265SDimitry Andric 
21281ad6265SDimitry Andric static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
21381ad6265SDimitry Andric                            unsigned NF, RISCVII::VLMUL LMUL) {
21481ad6265SDimitry Andric   static const unsigned M1TupleRegClassIDs[] = {
21581ad6265SDimitry Andric       RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
21681ad6265SDimitry Andric       RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
21781ad6265SDimitry Andric       RISCV::VRN8M1RegClassID};
21881ad6265SDimitry Andric   static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID,
21981ad6265SDimitry Andric                                                 RISCV::VRN3M2RegClassID,
22081ad6265SDimitry Andric                                                 RISCV::VRN4M2RegClassID};
22181ad6265SDimitry Andric 
222e8d8bef9SDimitry Andric   assert(Regs.size() >= 2 && Regs.size() <= 8);
223e8d8bef9SDimitry Andric 
22481ad6265SDimitry Andric   unsigned RegClassID;
22581ad6265SDimitry Andric   unsigned SubReg0;
22681ad6265SDimitry Andric   switch (LMUL) {
22781ad6265SDimitry Andric   default:
22881ad6265SDimitry Andric     llvm_unreachable("Invalid LMUL.");
22981ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_F8:
23081ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_F4:
23181ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_F2:
23281ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_1:
23381ad6265SDimitry Andric     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
23481ad6265SDimitry Andric                   "Unexpected subreg numbering");
23581ad6265SDimitry Andric     SubReg0 = RISCV::sub_vrm1_0;
23681ad6265SDimitry Andric     RegClassID = M1TupleRegClassIDs[NF - 2];
23781ad6265SDimitry Andric     break;
23881ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_2:
23981ad6265SDimitry Andric     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
24081ad6265SDimitry Andric                   "Unexpected subreg numbering");
24181ad6265SDimitry Andric     SubReg0 = RISCV::sub_vrm2_0;
24281ad6265SDimitry Andric     RegClassID = M2TupleRegClassIDs[NF - 2];
24381ad6265SDimitry Andric     break;
24481ad6265SDimitry Andric   case RISCVII::VLMUL::LMUL_4:
24581ad6265SDimitry Andric     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
24681ad6265SDimitry Andric                   "Unexpected subreg numbering");
24781ad6265SDimitry Andric     SubReg0 = RISCV::sub_vrm4_0;
24881ad6265SDimitry Andric     RegClassID = RISCV::VRN2M4RegClassID;
24981ad6265SDimitry Andric     break;
25081ad6265SDimitry Andric   }
25181ad6265SDimitry Andric 
252e8d8bef9SDimitry Andric   SDLoc DL(Regs[0]);
253e8d8bef9SDimitry Andric   SmallVector<SDValue, 8> Ops;
254e8d8bef9SDimitry Andric 
255e8d8bef9SDimitry Andric   Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
256e8d8bef9SDimitry Andric 
257e8d8bef9SDimitry Andric   for (unsigned I = 0; I < Regs.size(); ++I) {
258e8d8bef9SDimitry Andric     Ops.push_back(Regs[I]);
259e8d8bef9SDimitry Andric     Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
260e8d8bef9SDimitry Andric   }
261e8d8bef9SDimitry Andric   SDNode *N =
262e8d8bef9SDimitry Andric       CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
263e8d8bef9SDimitry Andric   return SDValue(N, 0);
264e8d8bef9SDimitry Andric }
265e8d8bef9SDimitry Andric 
266fe6060f1SDimitry Andric void RISCVDAGToDAGISel::addVectorLoadStoreOperands(
267fe6060f1SDimitry Andric     SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp,
268fe6060f1SDimitry Andric     bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands,
269349cc55cSDimitry Andric     bool IsLoad, MVT *IndexVT) {
270fe6060f1SDimitry Andric   SDValue Chain = Node->getOperand(0);
271fe6060f1SDimitry Andric   SDValue Glue;
272fe6060f1SDimitry Andric 
273753f127fSDimitry Andric   Operands.push_back(Node->getOperand(CurOp++)); // Base pointer.
274fe6060f1SDimitry Andric 
275fe6060f1SDimitry Andric   if (IsStridedOrIndexed) {
276fe6060f1SDimitry Andric     Operands.push_back(Node->getOperand(CurOp++)); // Index.
277fe6060f1SDimitry Andric     if (IndexVT)
278fe6060f1SDimitry Andric       *IndexVT = Operands.back()->getSimpleValueType(0);
279fe6060f1SDimitry Andric   }
280fe6060f1SDimitry Andric 
281fe6060f1SDimitry Andric   if (IsMasked) {
282fe6060f1SDimitry Andric     // Mask needs to be copied to V0.
283fe6060f1SDimitry Andric     SDValue Mask = Node->getOperand(CurOp++);
284fe6060f1SDimitry Andric     Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue());
285fe6060f1SDimitry Andric     Glue = Chain.getValue(1);
286fe6060f1SDimitry Andric     Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType()));
287fe6060f1SDimitry Andric   }
288fe6060f1SDimitry Andric   SDValue VL;
289fe6060f1SDimitry Andric   selectVLOp(Node->getOperand(CurOp++), VL);
290fe6060f1SDimitry Andric   Operands.push_back(VL);
291fe6060f1SDimitry Andric 
292fe6060f1SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
293fe6060f1SDimitry Andric   SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
294fe6060f1SDimitry Andric   Operands.push_back(SEWOp);
295fe6060f1SDimitry Andric 
296349cc55cSDimitry Andric   // Masked load has the tail policy argument.
297349cc55cSDimitry Andric   if (IsMasked && IsLoad) {
298349cc55cSDimitry Andric     // Policy must be a constant.
299349cc55cSDimitry Andric     uint64_t Policy = Node->getConstantOperandVal(CurOp++);
300349cc55cSDimitry Andric     SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT);
301349cc55cSDimitry Andric     Operands.push_back(PolicyOp);
302349cc55cSDimitry Andric   }
303349cc55cSDimitry Andric 
304fe6060f1SDimitry Andric   Operands.push_back(Chain); // Chain.
305fe6060f1SDimitry Andric   if (Glue)
306fe6060f1SDimitry Andric     Operands.push_back(Glue);
307fe6060f1SDimitry Andric }
308fe6060f1SDimitry Andric 
30981ad6265SDimitry Andric static bool isAllUndef(ArrayRef<SDValue> Values) {
31081ad6265SDimitry Andric   return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); });
31181ad6265SDimitry Andric }
31281ad6265SDimitry Andric 
313fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked,
314e8d8bef9SDimitry Andric                                     bool IsStrided) {
315e8d8bef9SDimitry Andric   SDLoc DL(Node);
316e8d8bef9SDimitry Andric   unsigned NF = Node->getNumValues() - 1;
317fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
318fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
319fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
320e8d8bef9SDimitry Andric 
321fe6060f1SDimitry Andric   unsigned CurOp = 2;
322fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
32381ad6265SDimitry Andric 
324fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
325fe6060f1SDimitry Andric                                Node->op_begin() + CurOp + NF);
32681ad6265SDimitry Andric   bool IsTU = IsMasked || !isAllUndef(Regs);
32781ad6265SDimitry Andric   if (IsTU) {
32881ad6265SDimitry Andric     SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL);
32981ad6265SDimitry Andric     Operands.push_back(Merge);
330e8d8bef9SDimitry Andric   }
33181ad6265SDimitry Andric   CurOp += NF;
332fe6060f1SDimitry Andric 
333fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
334349cc55cSDimitry Andric                              Operands, /*IsLoad=*/true);
335fe6060f1SDimitry Andric 
336fe6060f1SDimitry Andric   const RISCV::VLSEGPseudo *P =
33781ad6265SDimitry Andric       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
338fe6060f1SDimitry Andric                             static_cast<unsigned>(LMUL));
339fe6060f1SDimitry Andric   MachineSDNode *Load =
340e8d8bef9SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
341fe6060f1SDimitry Andric 
342fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
343fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
344fe6060f1SDimitry Andric 
345e8d8bef9SDimitry Andric   SDValue SuperReg = SDValue(Load, 0);
346fe6060f1SDimitry Andric   for (unsigned I = 0; I < NF; ++I) {
347fe6060f1SDimitry Andric     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
348e8d8bef9SDimitry Andric     ReplaceUses(SDValue(Node, I),
349fe6060f1SDimitry Andric                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
350fe6060f1SDimitry Andric   }
351e8d8bef9SDimitry Andric 
352e8d8bef9SDimitry Andric   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
353e8d8bef9SDimitry Andric   CurDAG->RemoveDeadNode(Node);
354e8d8bef9SDimitry Andric }
355e8d8bef9SDimitry Andric 
356fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
357e8d8bef9SDimitry Andric   SDLoc DL(Node);
358fe6060f1SDimitry Andric   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
359fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
360e8d8bef9SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
361fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
362fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
363e8d8bef9SDimitry Andric 
364fe6060f1SDimitry Andric   unsigned CurOp = 2;
365e8d8bef9SDimitry Andric   SmallVector<SDValue, 7> Operands;
36681ad6265SDimitry Andric 
367fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
368fe6060f1SDimitry Andric                                Node->op_begin() + CurOp + NF);
36981ad6265SDimitry Andric   bool IsTU = IsMasked || !isAllUndef(Regs);
37081ad6265SDimitry Andric   if (IsTU) {
371e8d8bef9SDimitry Andric     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
372fe6060f1SDimitry Andric     Operands.push_back(MaskedOff);
373fe6060f1SDimitry Andric   }
37481ad6265SDimitry Andric   CurOp += NF;
375e8d8bef9SDimitry Andric 
376fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
377349cc55cSDimitry Andric                              /*IsStridedOrIndexed*/ false, Operands,
378349cc55cSDimitry Andric                              /*IsLoad=*/true);
379fe6060f1SDimitry Andric 
380fe6060f1SDimitry Andric   const RISCV::VLSEGPseudo *P =
38181ad6265SDimitry Andric       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
382fe6060f1SDimitry Andric                             Log2SEW, static_cast<unsigned>(LMUL));
383fe6060f1SDimitry Andric   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
38481ad6265SDimitry Andric                                                XLenVT, MVT::Other, Operands);
385fe6060f1SDimitry Andric 
386fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
387fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
388fe6060f1SDimitry Andric 
389e8d8bef9SDimitry Andric   SDValue SuperReg = SDValue(Load, 0);
390fe6060f1SDimitry Andric   for (unsigned I = 0; I < NF; ++I) {
391fe6060f1SDimitry Andric     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
392e8d8bef9SDimitry Andric     ReplaceUses(SDValue(Node, I),
393fe6060f1SDimitry Andric                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
394fe6060f1SDimitry Andric   }
395fe6060f1SDimitry Andric 
39681ad6265SDimitry Andric   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));     // VL
39781ad6265SDimitry Andric   ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain
398fe6060f1SDimitry Andric   CurDAG->RemoveDeadNode(Node);
399fe6060f1SDimitry Andric }
400fe6060f1SDimitry Andric 
401fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked,
402fe6060f1SDimitry Andric                                      bool IsOrdered) {
403fe6060f1SDimitry Andric   SDLoc DL(Node);
404fe6060f1SDimitry Andric   unsigned NF = Node->getNumValues() - 1;
405fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
406fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
407fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
408fe6060f1SDimitry Andric 
409fe6060f1SDimitry Andric   unsigned CurOp = 2;
410fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
41181ad6265SDimitry Andric 
412fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp,
413fe6060f1SDimitry Andric                                Node->op_begin() + CurOp + NF);
41481ad6265SDimitry Andric   bool IsTU = IsMasked || !isAllUndef(Regs);
41581ad6265SDimitry Andric   if (IsTU) {
416fe6060f1SDimitry Andric     SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
417fe6060f1SDimitry Andric     Operands.push_back(MaskedOff);
418fe6060f1SDimitry Andric   }
41981ad6265SDimitry Andric   CurOp += NF;
420fe6060f1SDimitry Andric 
421fe6060f1SDimitry Andric   MVT IndexVT;
422fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
423349cc55cSDimitry Andric                              /*IsStridedOrIndexed*/ true, Operands,
424349cc55cSDimitry Andric                              /*IsLoad=*/true, &IndexVT);
425fe6060f1SDimitry Andric 
426fe6060f1SDimitry Andric   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
427fe6060f1SDimitry Andric          "Element count mismatch");
428fe6060f1SDimitry Andric 
429fe6060f1SDimitry Andric   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
430fe6060f1SDimitry Andric   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
43104eeddc0SDimitry Andric   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
43204eeddc0SDimitry Andric     report_fatal_error("The V extension does not support EEW=64 for index "
43304eeddc0SDimitry Andric                        "values when XLEN=32");
43404eeddc0SDimitry Andric   }
435fe6060f1SDimitry Andric   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
43681ad6265SDimitry Andric       NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
437fe6060f1SDimitry Andric       static_cast<unsigned>(IndexLMUL));
438fe6060f1SDimitry Andric   MachineSDNode *Load =
439fe6060f1SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
440fe6060f1SDimitry Andric 
441fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
442fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
443fe6060f1SDimitry Andric 
444fe6060f1SDimitry Andric   SDValue SuperReg = SDValue(Load, 0);
445fe6060f1SDimitry Andric   for (unsigned I = 0; I < NF; ++I) {
446fe6060f1SDimitry Andric     unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I);
447fe6060f1SDimitry Andric     ReplaceUses(SDValue(Node, I),
448fe6060f1SDimitry Andric                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
449fe6060f1SDimitry Andric   }
450e8d8bef9SDimitry Andric 
451e8d8bef9SDimitry Andric   ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
452e8d8bef9SDimitry Andric   CurDAG->RemoveDeadNode(Node);
453e8d8bef9SDimitry Andric }
454e8d8bef9SDimitry Andric 
455fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked,
456e8d8bef9SDimitry Andric                                     bool IsStrided) {
457e8d8bef9SDimitry Andric   SDLoc DL(Node);
458e8d8bef9SDimitry Andric   unsigned NF = Node->getNumOperands() - 4;
459e8d8bef9SDimitry Andric   if (IsStrided)
460e8d8bef9SDimitry Andric     NF--;
461fe6060f1SDimitry Andric   if (IsMasked)
462e8d8bef9SDimitry Andric     NF--;
463fe6060f1SDimitry Andric   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
464fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
465fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
466e8d8bef9SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
467e8d8bef9SDimitry Andric   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
468fe6060f1SDimitry Andric 
469fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
470e8d8bef9SDimitry Andric   Operands.push_back(StoreVal);
471fe6060f1SDimitry Andric   unsigned CurOp = 2 + NF;
472fe6060f1SDimitry Andric 
473fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
474fe6060f1SDimitry Andric                              Operands);
475fe6060f1SDimitry Andric 
476fe6060f1SDimitry Andric   const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo(
477fe6060f1SDimitry Andric       NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
478fe6060f1SDimitry Andric   MachineSDNode *Store =
479e8d8bef9SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
480fe6060f1SDimitry Andric 
481fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
482fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
483fe6060f1SDimitry Andric 
484e8d8bef9SDimitry Andric   ReplaceNode(Node, Store);
485e8d8bef9SDimitry Andric }
486e8d8bef9SDimitry Andric 
487fe6060f1SDimitry Andric void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked,
488fe6060f1SDimitry Andric                                      bool IsOrdered) {
489e8d8bef9SDimitry Andric   SDLoc DL(Node);
490e8d8bef9SDimitry Andric   unsigned NF = Node->getNumOperands() - 5;
491fe6060f1SDimitry Andric   if (IsMasked)
492fe6060f1SDimitry Andric     --NF;
493fe6060f1SDimitry Andric   MVT VT = Node->getOperand(2)->getSimpleValueType(0);
494fe6060f1SDimitry Andric   unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
495fe6060f1SDimitry Andric   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
496e8d8bef9SDimitry Andric   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
497e8d8bef9SDimitry Andric   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
498e8d8bef9SDimitry Andric 
499fe6060f1SDimitry Andric   SmallVector<SDValue, 8> Operands;
500fe6060f1SDimitry Andric   Operands.push_back(StoreVal);
501fe6060f1SDimitry Andric   unsigned CurOp = 2 + NF;
502fe6060f1SDimitry Andric 
503fe6060f1SDimitry Andric   MVT IndexVT;
504fe6060f1SDimitry Andric   addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
505349cc55cSDimitry Andric                              /*IsStridedOrIndexed*/ true, Operands,
506349cc55cSDimitry Andric                              /*IsLoad=*/false, &IndexVT);
507fe6060f1SDimitry Andric 
508fe6060f1SDimitry Andric   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
509fe6060f1SDimitry Andric          "Element count mismatch");
510fe6060f1SDimitry Andric 
511fe6060f1SDimitry Andric   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
512fe6060f1SDimitry Andric   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
51304eeddc0SDimitry Andric   if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
51404eeddc0SDimitry Andric     report_fatal_error("The V extension does not support EEW=64 for index "
51504eeddc0SDimitry Andric                        "values when XLEN=32");
51604eeddc0SDimitry Andric   }
517fe6060f1SDimitry Andric   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
518fe6060f1SDimitry Andric       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
519e8d8bef9SDimitry Andric       static_cast<unsigned>(IndexLMUL));
520fe6060f1SDimitry Andric   MachineSDNode *Store =
521e8d8bef9SDimitry Andric       CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands);
522fe6060f1SDimitry Andric 
523fe6060f1SDimitry Andric   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
524fe6060f1SDimitry Andric     CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
525fe6060f1SDimitry Andric 
526e8d8bef9SDimitry Andric   ReplaceNode(Node, Store);
527e8d8bef9SDimitry Andric }
528e8d8bef9SDimitry Andric 
52904eeddc0SDimitry Andric void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
53004eeddc0SDimitry Andric   if (!Subtarget->hasVInstructions())
53104eeddc0SDimitry Andric     return;
53204eeddc0SDimitry Andric 
53304eeddc0SDimitry Andric   assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
53404eeddc0SDimitry Andric           Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) &&
53504eeddc0SDimitry Andric          "Unexpected opcode");
53604eeddc0SDimitry Andric 
53704eeddc0SDimitry Andric   SDLoc DL(Node);
53804eeddc0SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
53904eeddc0SDimitry Andric 
54004eeddc0SDimitry Andric   bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN;
54104eeddc0SDimitry Andric   unsigned IntNoOffset = HasChain ? 1 : 0;
54204eeddc0SDimitry Andric   unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
54304eeddc0SDimitry Andric 
54404eeddc0SDimitry Andric   assert((IntNo == Intrinsic::riscv_vsetvli ||
54504eeddc0SDimitry Andric           IntNo == Intrinsic::riscv_vsetvlimax ||
54604eeddc0SDimitry Andric           IntNo == Intrinsic::riscv_vsetvli_opt ||
54704eeddc0SDimitry Andric           IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
54804eeddc0SDimitry Andric          "Unexpected vsetvli intrinsic");
54904eeddc0SDimitry Andric 
55004eeddc0SDimitry Andric   bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
55104eeddc0SDimitry Andric                IntNo == Intrinsic::riscv_vsetvlimax_opt;
55204eeddc0SDimitry Andric   unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
55304eeddc0SDimitry Andric 
55404eeddc0SDimitry Andric   assert(Node->getNumOperands() == Offset + 2 &&
55504eeddc0SDimitry Andric          "Unexpected number of operands");
55604eeddc0SDimitry Andric 
55704eeddc0SDimitry Andric   unsigned SEW =
55804eeddc0SDimitry Andric       RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7);
55904eeddc0SDimitry Andric   RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>(
56004eeddc0SDimitry Andric       Node->getConstantOperandVal(Offset + 1) & 0x7);
56104eeddc0SDimitry Andric 
56204eeddc0SDimitry Andric   unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
56304eeddc0SDimitry Andric                                             /*MaskAgnostic*/ false);
56404eeddc0SDimitry Andric   SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
56504eeddc0SDimitry Andric 
56604eeddc0SDimitry Andric   SmallVector<EVT, 2> VTs = {XLenVT};
56704eeddc0SDimitry Andric   if (HasChain)
56804eeddc0SDimitry Andric     VTs.push_back(MVT::Other);
56904eeddc0SDimitry Andric 
57004eeddc0SDimitry Andric   SDValue VLOperand;
57104eeddc0SDimitry Andric   unsigned Opcode = RISCV::PseudoVSETVLI;
57204eeddc0SDimitry Andric   if (VLMax) {
57304eeddc0SDimitry Andric     VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT);
57404eeddc0SDimitry Andric     Opcode = RISCV::PseudoVSETVLIX0;
57504eeddc0SDimitry Andric   } else {
57604eeddc0SDimitry Andric     VLOperand = Node->getOperand(IntNoOffset + 1);
57704eeddc0SDimitry Andric 
57804eeddc0SDimitry Andric     if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) {
57904eeddc0SDimitry Andric       uint64_t AVL = C->getZExtValue();
58004eeddc0SDimitry Andric       if (isUInt<5>(AVL)) {
58104eeddc0SDimitry Andric         SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT);
58204eeddc0SDimitry Andric         SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp};
58304eeddc0SDimitry Andric         if (HasChain)
58404eeddc0SDimitry Andric           Ops.push_back(Node->getOperand(0));
58504eeddc0SDimitry Andric         ReplaceNode(
58604eeddc0SDimitry Andric             Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops));
58704eeddc0SDimitry Andric         return;
58804eeddc0SDimitry Andric       }
58904eeddc0SDimitry Andric     }
59004eeddc0SDimitry Andric   }
59104eeddc0SDimitry Andric 
59204eeddc0SDimitry Andric   SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp};
59304eeddc0SDimitry Andric   if (HasChain)
59404eeddc0SDimitry Andric     Ops.push_back(Node->getOperand(0));
59504eeddc0SDimitry Andric 
59604eeddc0SDimitry Andric   ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops));
59704eeddc0SDimitry Andric }
5980b57cec5SDimitry Andric 
599*bdd1243dSDimitry Andric bool RISCVDAGToDAGISel::tryShrinkShlLogicImm(SDNode *Node) {
600*bdd1243dSDimitry Andric   MVT VT = Node->getSimpleValueType(0);
601*bdd1243dSDimitry Andric   unsigned Opcode = Node->getOpcode();
602*bdd1243dSDimitry Andric   assert((Opcode == ISD::AND || Opcode == ISD::OR || Opcode == ISD::XOR) &&
603*bdd1243dSDimitry Andric          "Unexpected opcode");
604*bdd1243dSDimitry Andric   SDLoc DL(Node);
605*bdd1243dSDimitry Andric 
606*bdd1243dSDimitry Andric   // For operations of the form (x << C1) op C2, check if we can use
607*bdd1243dSDimitry Andric   // ANDI/ORI/XORI by transforming it into (x op (C2>>C1)) << C1.
608*bdd1243dSDimitry Andric   SDValue N0 = Node->getOperand(0);
609*bdd1243dSDimitry Andric   SDValue N1 = Node->getOperand(1);
610*bdd1243dSDimitry Andric 
611*bdd1243dSDimitry Andric   ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
612*bdd1243dSDimitry Andric   if (!Cst)
613*bdd1243dSDimitry Andric     return false;
614*bdd1243dSDimitry Andric 
615*bdd1243dSDimitry Andric   int64_t Val = Cst->getSExtValue();
616*bdd1243dSDimitry Andric 
617*bdd1243dSDimitry Andric   // Check if immediate can already use ANDI/ORI/XORI.
618*bdd1243dSDimitry Andric   if (isInt<12>(Val))
619*bdd1243dSDimitry Andric     return false;
620*bdd1243dSDimitry Andric 
621*bdd1243dSDimitry Andric   SDValue Shift = N0;
622*bdd1243dSDimitry Andric 
623*bdd1243dSDimitry Andric   // If Val is simm32 and we have a sext_inreg from i32, then the binop
624*bdd1243dSDimitry Andric   // produces at least 33 sign bits. We can peek through the sext_inreg and use
625*bdd1243dSDimitry Andric   // a SLLIW at the end.
626*bdd1243dSDimitry Andric   bool SignExt = false;
627*bdd1243dSDimitry Andric   if (isInt<32>(Val) && N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
628*bdd1243dSDimitry Andric       N0.hasOneUse() && cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i32) {
629*bdd1243dSDimitry Andric     SignExt = true;
630*bdd1243dSDimitry Andric     Shift = N0.getOperand(0);
631*bdd1243dSDimitry Andric   }
632*bdd1243dSDimitry Andric 
633*bdd1243dSDimitry Andric   if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
634*bdd1243dSDimitry Andric     return false;
635*bdd1243dSDimitry Andric 
636*bdd1243dSDimitry Andric   ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
637*bdd1243dSDimitry Andric   if (!ShlCst)
638*bdd1243dSDimitry Andric     return false;
639*bdd1243dSDimitry Andric 
640*bdd1243dSDimitry Andric   uint64_t ShAmt = ShlCst->getZExtValue();
641*bdd1243dSDimitry Andric 
642*bdd1243dSDimitry Andric   // Make sure that we don't change the operation by removing bits.
643*bdd1243dSDimitry Andric   // This only matters for OR and XOR, AND is unaffected.
644*bdd1243dSDimitry Andric   uint64_t RemovedBitsMask = maskTrailingOnes<uint64_t>(ShAmt);
645*bdd1243dSDimitry Andric   if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
646*bdd1243dSDimitry Andric     return false;
647*bdd1243dSDimitry Andric 
648*bdd1243dSDimitry Andric   int64_t ShiftedVal = Val >> ShAmt;
649*bdd1243dSDimitry Andric   if (!isInt<12>(ShiftedVal))
650*bdd1243dSDimitry Andric     return false;
651*bdd1243dSDimitry Andric 
652*bdd1243dSDimitry Andric   // If we peeked through a sext_inreg, make sure the shift is valid for SLLIW.
653*bdd1243dSDimitry Andric   if (SignExt && ShAmt >= 32)
654*bdd1243dSDimitry Andric     return false;
655*bdd1243dSDimitry Andric 
656*bdd1243dSDimitry Andric   // Ok, we can reorder to get a smaller immediate.
657*bdd1243dSDimitry Andric   unsigned BinOpc;
658*bdd1243dSDimitry Andric   switch (Opcode) {
659*bdd1243dSDimitry Andric   default: llvm_unreachable("Unexpected opcode");
660*bdd1243dSDimitry Andric   case ISD::AND: BinOpc = RISCV::ANDI; break;
661*bdd1243dSDimitry Andric   case ISD::OR:  BinOpc = RISCV::ORI;  break;
662*bdd1243dSDimitry Andric   case ISD::XOR: BinOpc = RISCV::XORI; break;
663*bdd1243dSDimitry Andric   }
664*bdd1243dSDimitry Andric 
665*bdd1243dSDimitry Andric   unsigned ShOpc = SignExt ? RISCV::SLLIW : RISCV::SLLI;
666*bdd1243dSDimitry Andric 
667*bdd1243dSDimitry Andric   SDNode *BinOp =
668*bdd1243dSDimitry Andric       CurDAG->getMachineNode(BinOpc, DL, VT, Shift.getOperand(0),
669*bdd1243dSDimitry Andric                              CurDAG->getTargetConstant(ShiftedVal, DL, VT));
670*bdd1243dSDimitry Andric   SDNode *SLLI =
671*bdd1243dSDimitry Andric       CurDAG->getMachineNode(ShOpc, DL, VT, SDValue(BinOp, 0),
672*bdd1243dSDimitry Andric                              CurDAG->getTargetConstant(ShAmt, DL, VT));
673*bdd1243dSDimitry Andric   ReplaceNode(Node, SLLI);
674*bdd1243dSDimitry Andric   return true;
675*bdd1243dSDimitry Andric }
676*bdd1243dSDimitry Andric 
6770b57cec5SDimitry Andric void RISCVDAGToDAGISel::Select(SDNode *Node) {
6780b57cec5SDimitry Andric   // If we have a custom node, we have already selected.
6790b57cec5SDimitry Andric   if (Node->isMachineOpcode()) {
6800b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
6810b57cec5SDimitry Andric     Node->setNodeId(-1);
6820b57cec5SDimitry Andric     return;
6830b57cec5SDimitry Andric   }
6840b57cec5SDimitry Andric 
6850b57cec5SDimitry Andric   // Instruction Selection not handled by the auto-generated tablegen selection
6860b57cec5SDimitry Andric   // should be handled here.
6870b57cec5SDimitry Andric   unsigned Opcode = Node->getOpcode();
6880b57cec5SDimitry Andric   MVT XLenVT = Subtarget->getXLenVT();
6890b57cec5SDimitry Andric   SDLoc DL(Node);
690fe6060f1SDimitry Andric   MVT VT = Node->getSimpleValueType(0);
6910b57cec5SDimitry Andric 
6920b57cec5SDimitry Andric   switch (Opcode) {
6930b57cec5SDimitry Andric   case ISD::Constant: {
694fe6060f1SDimitry Andric     auto *ConstNode = cast<ConstantSDNode>(Node);
695349cc55cSDimitry Andric     if (VT == XLenVT && ConstNode->isZero()) {
696e8d8bef9SDimitry Andric       SDValue New =
697e8d8bef9SDimitry Andric           CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT);
6980b57cec5SDimitry Andric       ReplaceNode(Node, New.getNode());
6990b57cec5SDimitry Andric       return;
7000b57cec5SDimitry Andric     }
701349cc55cSDimitry Andric     int64_t Imm = ConstNode->getSExtValue();
702349cc55cSDimitry Andric     // If the upper XLen-16 bits are not used, try to convert this to a simm12
703349cc55cSDimitry Andric     // by sign extending bit 15.
70481ad6265SDimitry Andric     if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) &&
705349cc55cSDimitry Andric         hasAllHUsers(Node))
70681ad6265SDimitry Andric       Imm = SignExtend64<16>(Imm);
707349cc55cSDimitry Andric     // If the upper 32-bits are not used try to convert this into a simm32 by
708349cc55cSDimitry Andric     // sign extending bit 32.
709349cc55cSDimitry Andric     if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
71081ad6265SDimitry Andric       Imm = SignExtend64<32>(Imm);
711349cc55cSDimitry Andric 
71204eeddc0SDimitry Andric     ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget));
7130b57cec5SDimitry Andric     return;
7140b57cec5SDimitry Andric   }
71581ad6265SDimitry Andric   case ISD::SHL: {
716fe6060f1SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
71704eeddc0SDimitry Andric     if (!N1C)
71804eeddc0SDimitry Andric       break;
719fe6060f1SDimitry Andric     SDValue N0 = Node->getOperand(0);
72004eeddc0SDimitry Andric     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() ||
72104eeddc0SDimitry Andric         !isa<ConstantSDNode>(N0.getOperand(1)))
72204eeddc0SDimitry Andric       break;
72304eeddc0SDimitry Andric     unsigned ShAmt = N1C->getZExtValue();
724fe6060f1SDimitry Andric     uint64_t Mask = N0.getConstantOperandVal(1);
72581ad6265SDimitry Andric 
72681ad6265SDimitry Andric     // Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has
72781ad6265SDimitry Andric     // 32 leading zeros and C3 trailing zeros.
72881ad6265SDimitry Andric     if (ShAmt <= 32 && isShiftedMask_64(Mask)) {
72981ad6265SDimitry Andric       unsigned XLen = Subtarget->getXLen();
730*bdd1243dSDimitry Andric       unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
73181ad6265SDimitry Andric       unsigned TrailingZeros = countTrailingZeros(Mask);
73281ad6265SDimitry Andric       if (TrailingZeros > 0 && LeadingZeros == 32) {
73381ad6265SDimitry Andric         SDNode *SRLIW = CurDAG->getMachineNode(
73481ad6265SDimitry Andric             RISCV::SRLIW, DL, VT, N0->getOperand(0),
73581ad6265SDimitry Andric             CurDAG->getTargetConstant(TrailingZeros, DL, VT));
73681ad6265SDimitry Andric         SDNode *SLLI = CurDAG->getMachineNode(
73781ad6265SDimitry Andric             RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
73881ad6265SDimitry Andric             CurDAG->getTargetConstant(TrailingZeros + ShAmt, DL, VT));
73981ad6265SDimitry Andric         ReplaceNode(Node, SLLI);
74081ad6265SDimitry Andric         return;
74181ad6265SDimitry Andric       }
74281ad6265SDimitry Andric     }
74381ad6265SDimitry Andric     break;
74481ad6265SDimitry Andric   }
74581ad6265SDimitry Andric   case ISD::SRL: {
74681ad6265SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
74781ad6265SDimitry Andric     if (!N1C)
74881ad6265SDimitry Andric       break;
74981ad6265SDimitry Andric     SDValue N0 = Node->getOperand(0);
750*bdd1243dSDimitry Andric     if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
75181ad6265SDimitry Andric       break;
75281ad6265SDimitry Andric     unsigned ShAmt = N1C->getZExtValue();
75381ad6265SDimitry Andric     uint64_t Mask = N0.getConstantOperandVal(1);
75481ad6265SDimitry Andric 
75581ad6265SDimitry Andric     // Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
75681ad6265SDimitry Andric     // 32 leading zeros and C3 trailing zeros.
757*bdd1243dSDimitry Andric     if (isShiftedMask_64(Mask) && N0.hasOneUse()) {
75881ad6265SDimitry Andric       unsigned XLen = Subtarget->getXLen();
759*bdd1243dSDimitry Andric       unsigned LeadingZeros = XLen - llvm::bit_width(Mask);
76081ad6265SDimitry Andric       unsigned TrailingZeros = countTrailingZeros(Mask);
76181ad6265SDimitry Andric       if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
76281ad6265SDimitry Andric         SDNode *SRLIW = CurDAG->getMachineNode(
76381ad6265SDimitry Andric             RISCV::SRLIW, DL, VT, N0->getOperand(0),
76481ad6265SDimitry Andric             CurDAG->getTargetConstant(TrailingZeros, DL, VT));
76581ad6265SDimitry Andric         SDNode *SLLI = CurDAG->getMachineNode(
76681ad6265SDimitry Andric             RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
76781ad6265SDimitry Andric             CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT));
76881ad6265SDimitry Andric         ReplaceNode(Node, SLLI);
76981ad6265SDimitry Andric         return;
77081ad6265SDimitry Andric       }
77181ad6265SDimitry Andric     }
77281ad6265SDimitry Andric 
77381ad6265SDimitry Andric     // Optimize (srl (and X, C2), C) ->
77481ad6265SDimitry Andric     //          (srli (slli X, (XLen-C3), (XLen-C3) + C)
77581ad6265SDimitry Andric     // Where C2 is a mask with C3 trailing ones.
77681ad6265SDimitry Andric     // Taking into account that the C2 may have had lower bits unset by
77781ad6265SDimitry Andric     // SimplifyDemandedBits. This avoids materializing the C2 immediate.
77881ad6265SDimitry Andric     // This pattern occurs when type legalizing right shifts for types with
77981ad6265SDimitry Andric     // less than XLen bits.
780fe6060f1SDimitry Andric     Mask |= maskTrailingOnes<uint64_t>(ShAmt);
78104eeddc0SDimitry Andric     if (!isMask_64(Mask))
78204eeddc0SDimitry Andric       break;
78304eeddc0SDimitry Andric     unsigned TrailingOnes = countTrailingOnes(Mask);
784*bdd1243dSDimitry Andric     if (ShAmt >= TrailingOnes)
78504eeddc0SDimitry Andric       break;
786*bdd1243dSDimitry Andric     // If the mask has 32 trailing ones, use SRLIW.
787*bdd1243dSDimitry Andric     if (TrailingOnes == 32) {
788*bdd1243dSDimitry Andric       SDNode *SRLIW =
789*bdd1243dSDimitry Andric           CurDAG->getMachineNode(RISCV::SRLIW, DL, VT, N0->getOperand(0),
790*bdd1243dSDimitry Andric                                  CurDAG->getTargetConstant(ShAmt, DL, VT));
791*bdd1243dSDimitry Andric       ReplaceNode(Node, SRLIW);
792*bdd1243dSDimitry Andric       return;
793*bdd1243dSDimitry Andric     }
794*bdd1243dSDimitry Andric 
795*bdd1243dSDimitry Andric     // Only do the remaining transforms if the shift has one use.
796*bdd1243dSDimitry Andric     if (!N0.hasOneUse())
797*bdd1243dSDimitry Andric       break;
798*bdd1243dSDimitry Andric 
799fcaf7f86SDimitry Andric     // If C2 is (1 << ShAmt) use bexti if possible.
800fcaf7f86SDimitry Andric     if (Subtarget->hasStdExtZbs() && ShAmt + 1 == TrailingOnes) {
801fcaf7f86SDimitry Andric       SDNode *BEXTI =
802fcaf7f86SDimitry Andric           CurDAG->getMachineNode(RISCV::BEXTI, DL, VT, N0->getOperand(0),
803fcaf7f86SDimitry Andric                                  CurDAG->getTargetConstant(ShAmt, DL, VT));
804fcaf7f86SDimitry Andric       ReplaceNode(Node, BEXTI);
805fcaf7f86SDimitry Andric       return;
806fcaf7f86SDimitry Andric     }
80704eeddc0SDimitry Andric     unsigned LShAmt = Subtarget->getXLen() - TrailingOnes;
808fe6060f1SDimitry Andric     SDNode *SLLI =
809fe6060f1SDimitry Andric         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
810fe6060f1SDimitry Andric                                CurDAG->getTargetConstant(LShAmt, DL, VT));
811fe6060f1SDimitry Andric     SDNode *SRLI = CurDAG->getMachineNode(
812fe6060f1SDimitry Andric         RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
813fe6060f1SDimitry Andric         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
814fe6060f1SDimitry Andric     ReplaceNode(Node, SRLI);
815fe6060f1SDimitry Andric     return;
816fe6060f1SDimitry Andric   }
81704eeddc0SDimitry Andric   case ISD::SRA: {
81804eeddc0SDimitry Andric     // Optimize (sra (sext_inreg X, i16), C) ->
81904eeddc0SDimitry Andric     //          (srai (slli X, (XLen-16), (XLen-16) + C)
82004eeddc0SDimitry Andric     // And      (sra (sext_inreg X, i8), C) ->
82104eeddc0SDimitry Andric     //          (srai (slli X, (XLen-8), (XLen-8) + C)
82204eeddc0SDimitry Andric     // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal.
82304eeddc0SDimitry Andric     // This transform matches the code we get without Zbb. The shifts are more
82404eeddc0SDimitry Andric     // compressible, and this can help expose CSE opportunities in the sdiv by
82504eeddc0SDimitry Andric     // constant optimization.
82604eeddc0SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
82704eeddc0SDimitry Andric     if (!N1C)
828fe6060f1SDimitry Andric       break;
82904eeddc0SDimitry Andric     SDValue N0 = Node->getOperand(0);
83004eeddc0SDimitry Andric     if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse())
83104eeddc0SDimitry Andric       break;
83204eeddc0SDimitry Andric     unsigned ShAmt = N1C->getZExtValue();
83304eeddc0SDimitry Andric     unsigned ExtSize =
83404eeddc0SDimitry Andric         cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits();
83504eeddc0SDimitry Andric     // ExtSize of 32 should use sraiw via tablegen pattern.
83604eeddc0SDimitry Andric     if (ExtSize >= 32 || ShAmt >= ExtSize)
83704eeddc0SDimitry Andric       break;
83804eeddc0SDimitry Andric     unsigned LShAmt = Subtarget->getXLen() - ExtSize;
83904eeddc0SDimitry Andric     SDNode *SLLI =
84004eeddc0SDimitry Andric         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0),
84104eeddc0SDimitry Andric                                CurDAG->getTargetConstant(LShAmt, DL, VT));
84204eeddc0SDimitry Andric     SDNode *SRAI = CurDAG->getMachineNode(
84304eeddc0SDimitry Andric         RISCV::SRAI, DL, VT, SDValue(SLLI, 0),
84404eeddc0SDimitry Andric         CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT));
84504eeddc0SDimitry Andric     ReplaceNode(Node, SRAI);
84604eeddc0SDimitry Andric     return;
847fe6060f1SDimitry Andric   }
848*bdd1243dSDimitry Andric   case ISD::OR:
849*bdd1243dSDimitry Andric   case ISD::XOR:
850*bdd1243dSDimitry Andric     if (tryShrinkShlLogicImm(Node))
851*bdd1243dSDimitry Andric       return;
852*bdd1243dSDimitry Andric 
853*bdd1243dSDimitry Andric     break;
854fe6060f1SDimitry Andric   case ISD::AND: {
855fe6060f1SDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
856fe6060f1SDimitry Andric     if (!N1C)
857fe6060f1SDimitry Andric       break;
858fe6060f1SDimitry Andric 
859fe6060f1SDimitry Andric     SDValue N0 = Node->getOperand(0);
860fe6060f1SDimitry Andric 
861fe6060f1SDimitry Andric     bool LeftShift = N0.getOpcode() == ISD::SHL;
862*bdd1243dSDimitry Andric     if (LeftShift || N0.getOpcode() == ISD::SRL) {
863fe6060f1SDimitry Andric       auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
864fe6060f1SDimitry Andric       if (!C)
865fe6060f1SDimitry Andric         break;
866753f127fSDimitry Andric       unsigned C2 = C->getZExtValue();
867fe6060f1SDimitry Andric       unsigned XLen = Subtarget->getXLen();
868753f127fSDimitry Andric       assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!");
869fe6060f1SDimitry Andric 
870fe6060f1SDimitry Andric       uint64_t C1 = N1C->getZExtValue();
871fe6060f1SDimitry Andric 
87281ad6265SDimitry Andric       // Keep track of whether this is a c.andi. If we can't use c.andi, the
87381ad6265SDimitry Andric       // shift pair might offer more compression opportunities.
87481ad6265SDimitry Andric       // TODO: We could check for C extension here, but we don't have many lit
875*bdd1243dSDimitry Andric       // tests with the C extension enabled so not checking gets better
876*bdd1243dSDimitry Andric       // coverage.
87781ad6265SDimitry Andric       // TODO: What if ANDI faster than shift?
87881ad6265SDimitry Andric       bool IsCANDI = isInt<6>(N1C->getSExtValue());
879fe6060f1SDimitry Andric 
880fe6060f1SDimitry Andric       // Clear irrelevant bits in the mask.
881fe6060f1SDimitry Andric       if (LeftShift)
882fe6060f1SDimitry Andric         C1 &= maskTrailingZeros<uint64_t>(C2);
883fe6060f1SDimitry Andric       else
884fe6060f1SDimitry Andric         C1 &= maskTrailingOnes<uint64_t>(XLen - C2);
885fe6060f1SDimitry Andric 
886fe6060f1SDimitry Andric       // Some transforms should only be done if the shift has a single use or
887fe6060f1SDimitry Andric       // the AND would become (srli (slli X, 32), 32)
888fe6060f1SDimitry Andric       bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF);
889fe6060f1SDimitry Andric 
890fe6060f1SDimitry Andric       SDValue X = N0.getOperand(0);
891fe6060f1SDimitry Andric 
892fe6060f1SDimitry Andric       // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask
893fe6060f1SDimitry Andric       // with c3 leading zeros.
894fe6060f1SDimitry Andric       if (!LeftShift && isMask_64(C1)) {
895*bdd1243dSDimitry Andric         unsigned Leading = XLen - llvm::bit_width(C1);
896753f127fSDimitry Andric         if (C2 < Leading) {
897fe6060f1SDimitry Andric           // If the number of leading zeros is C2+32 this can be SRLIW.
898753f127fSDimitry Andric           if (C2 + 32 == Leading) {
89981ad6265SDimitry Andric             SDNode *SRLIW = CurDAG->getMachineNode(
90081ad6265SDimitry Andric                 RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT));
901fe6060f1SDimitry Andric             ReplaceNode(Node, SRLIW);
902fe6060f1SDimitry Andric             return;
903fe6060f1SDimitry Andric           }
904fe6060f1SDimitry Andric 
905*bdd1243dSDimitry Andric           // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32)
906*bdd1243dSDimitry Andric           // if c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1.
907fe6060f1SDimitry Andric           //
908fe6060f1SDimitry Andric           // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type
909fe6060f1SDimitry Andric           // legalized and goes through DAG combine.
910753f127fSDimitry Andric           if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() &&
91181ad6265SDimitry Andric               X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
91281ad6265SDimitry Andric               cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) {
913fe6060f1SDimitry Andric             SDNode *SRAIW =
91481ad6265SDimitry Andric                 CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0),
91581ad6265SDimitry Andric                                        CurDAG->getTargetConstant(31, DL, VT));
916fe6060f1SDimitry Andric             SDNode *SRLIW = CurDAG->getMachineNode(
91781ad6265SDimitry Andric                 RISCV::SRLIW, DL, VT, SDValue(SRAIW, 0),
918753f127fSDimitry Andric                 CurDAG->getTargetConstant(Leading - 32, DL, VT));
919fe6060f1SDimitry Andric             ReplaceNode(Node, SRLIW);
920fe6060f1SDimitry Andric             return;
921fe6060f1SDimitry Andric           }
922fe6060f1SDimitry Andric 
923fe6060f1SDimitry Andric           // (srli (slli x, c3-c2), c3).
92481ad6265SDimitry Andric           // Skip if we could use (zext.w (sraiw X, C2)).
925753f127fSDimitry Andric           bool Skip = Subtarget->hasStdExtZba() && Leading == 32 &&
92681ad6265SDimitry Andric                       X.getOpcode() == ISD::SIGN_EXTEND_INREG &&
92781ad6265SDimitry Andric                       cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32;
92881ad6265SDimitry Andric           // Also Skip if we can use bexti.
929753f127fSDimitry Andric           Skip |= Subtarget->hasStdExtZbs() && Leading == XLen - 1;
93081ad6265SDimitry Andric           if (OneUseOrZExtW && !Skip) {
931fe6060f1SDimitry Andric             SDNode *SLLI = CurDAG->getMachineNode(
93281ad6265SDimitry Andric                 RISCV::SLLI, DL, VT, X,
933753f127fSDimitry Andric                 CurDAG->getTargetConstant(Leading - C2, DL, VT));
934753f127fSDimitry Andric             SDNode *SRLI = CurDAG->getMachineNode(
935753f127fSDimitry Andric                 RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
936753f127fSDimitry Andric                 CurDAG->getTargetConstant(Leading, DL, VT));
937fe6060f1SDimitry Andric             ReplaceNode(Node, SRLI);
938fe6060f1SDimitry Andric             return;
939fe6060f1SDimitry Andric           }
940fe6060f1SDimitry Andric         }
941fe6060f1SDimitry Andric       }
942fe6060f1SDimitry Andric 
943349cc55cSDimitry Andric       // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask
944fe6060f1SDimitry Andric       // shifted by c2 bits with c3 leading zeros.
945fe6060f1SDimitry Andric       if (LeftShift && isShiftedMask_64(C1)) {
946*bdd1243dSDimitry Andric         unsigned Leading = XLen - llvm::bit_width(C1);
947fe6060f1SDimitry Andric 
948753f127fSDimitry Andric         if (C2 + Leading < XLen &&
949753f127fSDimitry Andric             C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) {
950fe6060f1SDimitry Andric           // Use slli.uw when possible.
951753f127fSDimitry Andric           if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) {
952*bdd1243dSDimitry Andric             SDNode *SLLI_UW =
953*bdd1243dSDimitry Andric                 CurDAG->getMachineNode(RISCV::SLLI_UW, DL, VT, X,
954*bdd1243dSDimitry Andric                                        CurDAG->getTargetConstant(C2, DL, VT));
9551fd87a68SDimitry Andric             ReplaceNode(Node, SLLI_UW);
956fe6060f1SDimitry Andric             return;
957fe6060f1SDimitry Andric           }
958fe6060f1SDimitry Andric 
959fe6060f1SDimitry Andric           // (srli (slli c2+c3), c3)
96081ad6265SDimitry Andric           if (OneUseOrZExtW && !IsCANDI) {
961fe6060f1SDimitry Andric             SDNode *SLLI = CurDAG->getMachineNode(
96281ad6265SDimitry Andric                 RISCV::SLLI, DL, VT, X,
963753f127fSDimitry Andric                 CurDAG->getTargetConstant(C2 + Leading, DL, VT));
964753f127fSDimitry Andric             SDNode *SRLI = CurDAG->getMachineNode(
965753f127fSDimitry Andric                 RISCV::SRLI, DL, VT, SDValue(SLLI, 0),
966753f127fSDimitry Andric                 CurDAG->getTargetConstant(Leading, DL, VT));
967fe6060f1SDimitry Andric             ReplaceNode(Node, SRLI);
968fe6060f1SDimitry Andric             return;
969fe6060f1SDimitry Andric           }
970fe6060f1SDimitry Andric         }
971fe6060f1SDimitry Andric       }
972fe6060f1SDimitry Andric 
973349cc55cSDimitry Andric       // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a
974349cc55cSDimitry Andric       // shifted mask with c2 leading zeros and c3 trailing zeros.
975349cc55cSDimitry Andric       if (!LeftShift && isShiftedMask_64(C1)) {
976*bdd1243dSDimitry Andric         unsigned Leading = XLen - llvm::bit_width(C1);
977753f127fSDimitry Andric         unsigned Trailing = countTrailingZeros(C1);
978*bdd1243dSDimitry Andric         if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW &&
979*bdd1243dSDimitry Andric             !IsCANDI) {
98081ad6265SDimitry Andric           unsigned SrliOpc = RISCV::SRLI;
98181ad6265SDimitry Andric           // If the input is zexti32 we should use SRLIW.
982*bdd1243dSDimitry Andric           if (X.getOpcode() == ISD::AND &&
983*bdd1243dSDimitry Andric               isa<ConstantSDNode>(X.getOperand(1)) &&
98481ad6265SDimitry Andric               X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) {
98581ad6265SDimitry Andric             SrliOpc = RISCV::SRLIW;
98681ad6265SDimitry Andric             X = X.getOperand(0);
98781ad6265SDimitry Andric           }
988349cc55cSDimitry Andric           SDNode *SRLI = CurDAG->getMachineNode(
989753f127fSDimitry Andric               SrliOpc, DL, VT, X,
990753f127fSDimitry Andric               CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
991*bdd1243dSDimitry Andric           SDNode *SLLI = CurDAG->getMachineNode(
992*bdd1243dSDimitry Andric               RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
993753f127fSDimitry Andric               CurDAG->getTargetConstant(Trailing, DL, VT));
994349cc55cSDimitry Andric           ReplaceNode(Node, SLLI);
995349cc55cSDimitry Andric           return;
996349cc55cSDimitry Andric         }
997349cc55cSDimitry Andric         // If the leading zero count is C2+32, we can use SRLIW instead of SRLI.
998753f127fSDimitry Andric         if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 &&
99981ad6265SDimitry Andric             OneUseOrZExtW && !IsCANDI) {
1000753f127fSDimitry Andric           SDNode *SRLIW = CurDAG->getMachineNode(
1001753f127fSDimitry Andric               RISCV::SRLIW, DL, VT, X,
1002753f127fSDimitry Andric               CurDAG->getTargetConstant(C2 + Trailing, DL, VT));
1003*bdd1243dSDimitry Andric           SDNode *SLLI = CurDAG->getMachineNode(
1004*bdd1243dSDimitry Andric               RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
1005753f127fSDimitry Andric               CurDAG->getTargetConstant(Trailing, DL, VT));
1006349cc55cSDimitry Andric           ReplaceNode(Node, SLLI);
1007349cc55cSDimitry Andric           return;
1008349cc55cSDimitry Andric         }
1009349cc55cSDimitry Andric       }
1010349cc55cSDimitry Andric 
1011349cc55cSDimitry Andric       // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a
1012349cc55cSDimitry Andric       // shifted mask with no leading zeros and c3 trailing zeros.
1013349cc55cSDimitry Andric       if (LeftShift && isShiftedMask_64(C1)) {
1014*bdd1243dSDimitry Andric         unsigned Leading = XLen - llvm::bit_width(C1);
1015753f127fSDimitry Andric         unsigned Trailing = countTrailingZeros(C1);
1016753f127fSDimitry Andric         if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) {
1017349cc55cSDimitry Andric           SDNode *SRLI = CurDAG->getMachineNode(
1018753f127fSDimitry Andric               RISCV::SRLI, DL, VT, X,
1019753f127fSDimitry Andric               CurDAG->getTargetConstant(Trailing - C2, DL, VT));
1020*bdd1243dSDimitry Andric           SDNode *SLLI = CurDAG->getMachineNode(
1021*bdd1243dSDimitry Andric               RISCV::SLLI, DL, VT, SDValue(SRLI, 0),
1022753f127fSDimitry Andric               CurDAG->getTargetConstant(Trailing, DL, VT));
1023349cc55cSDimitry Andric           ReplaceNode(Node, SLLI);
1024349cc55cSDimitry Andric           return;
1025349cc55cSDimitry Andric         }
1026349cc55cSDimitry Andric         // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI.
1027753f127fSDimitry Andric         if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) {
1028753f127fSDimitry Andric           SDNode *SRLIW = CurDAG->getMachineNode(
1029753f127fSDimitry Andric               RISCV::SRLIW, DL, VT, X,
1030753f127fSDimitry Andric               CurDAG->getTargetConstant(Trailing - C2, DL, VT));
1031*bdd1243dSDimitry Andric           SDNode *SLLI = CurDAG->getMachineNode(
1032*bdd1243dSDimitry Andric               RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
1033753f127fSDimitry Andric               CurDAG->getTargetConstant(Trailing, DL, VT));
1034349cc55cSDimitry Andric           ReplaceNode(Node, SLLI);
1035349cc55cSDimitry Andric           return;
1036349cc55cSDimitry Andric         }
1037349cc55cSDimitry Andric       }
1038*bdd1243dSDimitry Andric     }
1039*bdd1243dSDimitry Andric 
1040*bdd1243dSDimitry Andric     if (tryShrinkShlLogicImm(Node))
1041*bdd1243dSDimitry Andric       return;
1042349cc55cSDimitry Andric 
1043fe6060f1SDimitry Andric     break;
1044fe6060f1SDimitry Andric   }
10450eae32dcSDimitry Andric   case ISD::MUL: {
10460eae32dcSDimitry Andric     // Special case for calculating (mul (and X, C2), C1) where the full product
10470eae32dcSDimitry Andric     // fits in XLen bits. We can shift X left by the number of leading zeros in
10480eae32dcSDimitry Andric     // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final
10490eae32dcSDimitry Andric     // product has XLen trailing zeros, putting it in the output of MULHU. This
10500eae32dcSDimitry Andric     // can avoid materializing a constant in a register for C2.
10510eae32dcSDimitry Andric 
10520eae32dcSDimitry Andric     // RHS should be a constant.
10530eae32dcSDimitry Andric     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
10540eae32dcSDimitry Andric     if (!N1C || !N1C->hasOneUse())
10550eae32dcSDimitry Andric       break;
10560eae32dcSDimitry Andric 
10570eae32dcSDimitry Andric     // LHS should be an AND with constant.
10580eae32dcSDimitry Andric     SDValue N0 = Node->getOperand(0);
10590eae32dcSDimitry Andric     if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1)))
10600eae32dcSDimitry Andric       break;
10610eae32dcSDimitry Andric 
10620eae32dcSDimitry Andric     uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
10630eae32dcSDimitry Andric 
10640eae32dcSDimitry Andric     // Constant should be a mask.
10650eae32dcSDimitry Andric     if (!isMask_64(C2))
10660eae32dcSDimitry Andric       break;
10670eae32dcSDimitry Andric 
1068fcaf7f86SDimitry Andric     // If this can be an ANDI, ZEXT.H or ZEXT.W, don't do this if the ANDI/ZEXT
1069fcaf7f86SDimitry Andric     // has multiple users or the constant is a simm12. This prevents inserting
1070fcaf7f86SDimitry Andric     // a shift and still have uses of the AND/ZEXT. Shifting a simm12 will
1071fcaf7f86SDimitry Andric     // likely make it more costly to materialize. Otherwise, using a SLLI
1072fcaf7f86SDimitry Andric     // might allow it to be compressed.
1073fcaf7f86SDimitry Andric     bool IsANDIOrZExt =
1074fcaf7f86SDimitry Andric         isInt<12>(C2) ||
1075*bdd1243dSDimitry Andric         (C2 == UINT64_C(0xFFFF) && Subtarget->hasStdExtZbb()) ||
1076fcaf7f86SDimitry Andric         (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba());
1077fcaf7f86SDimitry Andric     if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.hasOneUse()))
10780eae32dcSDimitry Andric       break;
10790eae32dcSDimitry Andric 
10800eae32dcSDimitry Andric     // We need to shift left the AND input and C1 by a total of XLen bits.
10810eae32dcSDimitry Andric 
10820eae32dcSDimitry Andric     // How far left do we need to shift the AND input?
10830eae32dcSDimitry Andric     unsigned XLen = Subtarget->getXLen();
1084*bdd1243dSDimitry Andric     unsigned LeadingZeros = XLen - llvm::bit_width(C2);
10850eae32dcSDimitry Andric 
10860eae32dcSDimitry Andric     // The constant gets shifted by the remaining amount unless that would
10870eae32dcSDimitry Andric     // shift bits out.
10880eae32dcSDimitry Andric     uint64_t C1 = N1C->getZExtValue();
10890eae32dcSDimitry Andric     unsigned ConstantShift = XLen - LeadingZeros;
1090*bdd1243dSDimitry Andric     if (ConstantShift > (XLen - llvm::bit_width(C1)))
10910eae32dcSDimitry Andric       break;
10920eae32dcSDimitry Andric 
10930eae32dcSDimitry Andric     uint64_t ShiftedC1 = C1 << ConstantShift;
10940eae32dcSDimitry Andric     // If this RV32, we need to sign extend the constant.
10950eae32dcSDimitry Andric     if (XLen == 32)
109681ad6265SDimitry Andric       ShiftedC1 = SignExtend64<32>(ShiftedC1);
10970eae32dcSDimitry Andric 
10980eae32dcSDimitry Andric     // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))).
109904eeddc0SDimitry Andric     SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget);
11000eae32dcSDimitry Andric     SDNode *SLLI =
11010eae32dcSDimitry Andric         CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0),
11020eae32dcSDimitry Andric                                CurDAG->getTargetConstant(LeadingZeros, DL, VT));
11030eae32dcSDimitry Andric     SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT,
11040eae32dcSDimitry Andric                                            SDValue(SLLI, 0), SDValue(Imm, 0));
11050eae32dcSDimitry Andric     ReplaceNode(Node, MULHU);
11060eae32dcSDimitry Andric     return;
11070eae32dcSDimitry Andric   }
1108fe6060f1SDimitry Andric   case ISD::INTRINSIC_WO_CHAIN: {
1109fe6060f1SDimitry Andric     unsigned IntNo = Node->getConstantOperandVal(0);
1110fe6060f1SDimitry Andric     switch (IntNo) {
1111fe6060f1SDimitry Andric       // By default we do not custom select any intrinsic.
1112fe6060f1SDimitry Andric     default:
1113fe6060f1SDimitry Andric       break;
1114fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsgeu:
1115fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsge: {
1116fe6060f1SDimitry Andric       SDValue Src1 = Node->getOperand(1);
1117fe6060f1SDimitry Andric       SDValue Src2 = Node->getOperand(2);
111804eeddc0SDimitry Andric       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu;
111904eeddc0SDimitry Andric       bool IsCmpUnsignedZero = false;
1120fe6060f1SDimitry Andric       // Only custom select scalar second operand.
1121fe6060f1SDimitry Andric       if (Src2.getValueType() != XLenVT)
1122fe6060f1SDimitry Andric         break;
1123fe6060f1SDimitry Andric       // Small constants are handled with patterns.
1124fe6060f1SDimitry Andric       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1125fe6060f1SDimitry Andric         int64_t CVal = C->getSExtValue();
112604eeddc0SDimitry Andric         if (CVal >= -15 && CVal <= 16) {
112704eeddc0SDimitry Andric           if (!IsUnsigned || CVal != 0)
1128fe6060f1SDimitry Andric             break;
112904eeddc0SDimitry Andric           IsCmpUnsignedZero = true;
1130fe6060f1SDimitry Andric         }
113104eeddc0SDimitry Andric       }
1132fe6060f1SDimitry Andric       MVT Src1VT = Src1.getSimpleValueType();
113304eeddc0SDimitry Andric       unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode;
1134fe6060f1SDimitry Andric       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1135fe6060f1SDimitry Andric       default:
1136fe6060f1SDimitry Andric         llvm_unreachable("Unexpected LMUL!");
113704eeddc0SDimitry Andric #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b)            \
113804eeddc0SDimitry Andric   case RISCVII::VLMUL::lmulenum:                                               \
113904eeddc0SDimitry Andric     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
114004eeddc0SDimitry Andric                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
114104eeddc0SDimitry Andric     VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix;                            \
114204eeddc0SDimitry Andric     VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b;                             \
1143fe6060f1SDimitry Andric     break;
114404eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
114504eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
114604eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
114704eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
114804eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
114904eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
115004eeddc0SDimitry Andric         CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
115104eeddc0SDimitry Andric #undef CASE_VMSLT_VMNAND_VMSET_OPCODES
1152fe6060f1SDimitry Andric       }
1153fe6060f1SDimitry Andric       SDValue SEW = CurDAG->getTargetConstant(
1154fe6060f1SDimitry Andric           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1155fe6060f1SDimitry Andric       SDValue VL;
1156fe6060f1SDimitry Andric       selectVLOp(Node->getOperand(3), VL);
1157fe6060f1SDimitry Andric 
115804eeddc0SDimitry Andric       // If vmsgeu with 0 immediate, expand it to vmset.
115904eeddc0SDimitry Andric       if (IsCmpUnsignedZero) {
116004eeddc0SDimitry Andric         ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
116104eeddc0SDimitry Andric         return;
116204eeddc0SDimitry Andric       }
116304eeddc0SDimitry Andric 
1164fe6060f1SDimitry Andric       // Expand to
1165fe6060f1SDimitry Andric       // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
1166fe6060f1SDimitry Andric       SDValue Cmp = SDValue(
1167fe6060f1SDimitry Andric           CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1168fe6060f1SDimitry Andric           0);
1169fe6060f1SDimitry Andric       ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
1170fe6060f1SDimitry Andric                                                {Cmp, Cmp, VL, SEW}));
1171fe6060f1SDimitry Andric       return;
1172fe6060f1SDimitry Andric     }
1173fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsgeu_mask:
1174fe6060f1SDimitry Andric     case Intrinsic::riscv_vmsge_mask: {
1175fe6060f1SDimitry Andric       SDValue Src1 = Node->getOperand(2);
1176fe6060f1SDimitry Andric       SDValue Src2 = Node->getOperand(3);
117704eeddc0SDimitry Andric       bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
117804eeddc0SDimitry Andric       bool IsCmpUnsignedZero = false;
1179fe6060f1SDimitry Andric       // Only custom select scalar second operand.
1180fe6060f1SDimitry Andric       if (Src2.getValueType() != XLenVT)
1181fe6060f1SDimitry Andric         break;
1182fe6060f1SDimitry Andric       // Small constants are handled with patterns.
1183fe6060f1SDimitry Andric       if (auto *C = dyn_cast<ConstantSDNode>(Src2)) {
1184fe6060f1SDimitry Andric         int64_t CVal = C->getSExtValue();
118504eeddc0SDimitry Andric         if (CVal >= -15 && CVal <= 16) {
118604eeddc0SDimitry Andric           if (!IsUnsigned || CVal != 0)
1187fe6060f1SDimitry Andric             break;
118804eeddc0SDimitry Andric           IsCmpUnsignedZero = true;
1189fe6060f1SDimitry Andric         }
119004eeddc0SDimitry Andric       }
1191fe6060f1SDimitry Andric       MVT Src1VT = Src1.getSimpleValueType();
119204eeddc0SDimitry Andric       unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode,
119381ad6265SDimitry Andric           VMOROpcode;
1194fe6060f1SDimitry Andric       switch (RISCVTargetLowering::getLMUL(Src1VT)) {
1195fe6060f1SDimitry Andric       default:
1196fe6060f1SDimitry Andric         llvm_unreachable("Unexpected LMUL!");
119781ad6265SDimitry Andric #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b)                         \
119804eeddc0SDimitry Andric   case RISCVII::VLMUL::lmulenum:                                               \
119904eeddc0SDimitry Andric     VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix                 \
120004eeddc0SDimitry Andric                              : RISCV::PseudoVMSLT_VX_##suffix;                 \
120104eeddc0SDimitry Andric     VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK      \
120204eeddc0SDimitry Andric                                  : RISCV::PseudoVMSLT_VX_##suffix##_MASK;      \
1203fe6060f1SDimitry Andric     break;
120481ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
120581ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
120681ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
120781ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
120881ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
120981ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
121081ad6265SDimitry Andric         CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
121181ad6265SDimitry Andric #undef CASE_VMSLT_OPCODES
1212fe6060f1SDimitry Andric       }
1213fe6060f1SDimitry Andric       // Mask operations use the LMUL from the mask type.
1214fe6060f1SDimitry Andric       switch (RISCVTargetLowering::getLMUL(VT)) {
1215fe6060f1SDimitry Andric       default:
1216fe6060f1SDimitry Andric         llvm_unreachable("Unexpected LMUL!");
121781ad6265SDimitry Andric #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix)                       \
121804eeddc0SDimitry Andric   case RISCVII::VLMUL::lmulenum:                                               \
121904eeddc0SDimitry Andric     VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix;                              \
122004eeddc0SDimitry Andric     VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix;                            \
122181ad6265SDimitry Andric     VMOROpcode = RISCV::PseudoVMOR_MM_##suffix;                                \
1222fe6060f1SDimitry Andric     break;
122381ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8)
122481ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4)
122581ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2)
122681ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1)
122781ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2)
122881ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4)
122981ad6265SDimitry Andric         CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8)
123081ad6265SDimitry Andric #undef CASE_VMXOR_VMANDN_VMOR_OPCODES
1231fe6060f1SDimitry Andric       }
1232fe6060f1SDimitry Andric       SDValue SEW = CurDAG->getTargetConstant(
1233fe6060f1SDimitry Andric           Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
1234fe6060f1SDimitry Andric       SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
1235fe6060f1SDimitry Andric       SDValue VL;
1236fe6060f1SDimitry Andric       selectVLOp(Node->getOperand(5), VL);
1237fe6060f1SDimitry Andric       SDValue MaskedOff = Node->getOperand(1);
1238fe6060f1SDimitry Andric       SDValue Mask = Node->getOperand(4);
123904eeddc0SDimitry Andric 
124081ad6265SDimitry Andric       // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff.
124104eeddc0SDimitry Andric       if (IsCmpUnsignedZero) {
124281ad6265SDimitry Andric         // We don't need vmor if the MaskedOff and the Mask are the same
124381ad6265SDimitry Andric         // value.
124481ad6265SDimitry Andric         if (Mask == MaskedOff) {
124581ad6265SDimitry Andric           ReplaceUses(Node, Mask.getNode());
124681ad6265SDimitry Andric           return;
124781ad6265SDimitry Andric         }
124881ad6265SDimitry Andric         ReplaceNode(Node,
124981ad6265SDimitry Andric                     CurDAG->getMachineNode(VMOROpcode, DL, VT,
125081ad6265SDimitry Andric                                            {Mask, MaskedOff, VL, MaskSEW}));
125104eeddc0SDimitry Andric         return;
125204eeddc0SDimitry Andric       }
125304eeddc0SDimitry Andric 
1254fe6060f1SDimitry Andric       // If the MaskedOff value and the Mask are the same value use
1255349cc55cSDimitry Andric       // vmslt{u}.vx vt, va, x;  vmandn.mm vd, vd, vt
1256fe6060f1SDimitry Andric       // This avoids needing to copy v0 to vd before starting the next sequence.
1257fe6060f1SDimitry Andric       if (Mask == MaskedOff) {
1258fe6060f1SDimitry Andric         SDValue Cmp = SDValue(
1259fe6060f1SDimitry Andric             CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
1260fe6060f1SDimitry Andric             0);
1261349cc55cSDimitry Andric         ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
1262fe6060f1SDimitry Andric                                                  {Mask, Cmp, VL, MaskSEW}));
1263fe6060f1SDimitry Andric         return;
1264fe6060f1SDimitry Andric       }
1265fe6060f1SDimitry Andric 
1266fe6060f1SDimitry Andric       // Mask needs to be copied to V0.
1267fe6060f1SDimitry Andric       SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL,
1268fe6060f1SDimitry Andric                                            RISCV::V0, Mask, SDValue());
1269fe6060f1SDimitry Andric       SDValue Glue = Chain.getValue(1);
1270fe6060f1SDimitry Andric       SDValue V0 = CurDAG->getRegister(RISCV::V0, VT);
1271fe6060f1SDimitry Andric 
1272fe6060f1SDimitry Andric       // Otherwise use
1273fe6060f1SDimitry Andric       // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
127481ad6265SDimitry Andric       // The result is mask undisturbed.
127581ad6265SDimitry Andric       // We use the same instructions to emulate mask agnostic behavior, because
127681ad6265SDimitry Andric       // the agnostic result can be either undisturbed or all 1.
1277fe6060f1SDimitry Andric       SDValue Cmp = SDValue(
1278fe6060f1SDimitry Andric           CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
1279fe6060f1SDimitry Andric                                  {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
1280fe6060f1SDimitry Andric           0);
128181ad6265SDimitry Andric       // vmxor.mm vd, vd, v0 is used to update active value.
1282fe6060f1SDimitry Andric       ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
1283fe6060f1SDimitry Andric                                                {Cmp, Mask, VL, MaskSEW}));
1284fe6060f1SDimitry Andric       return;
1285fe6060f1SDimitry Andric     }
128604eeddc0SDimitry Andric     case Intrinsic::riscv_vsetvli_opt:
128704eeddc0SDimitry Andric     case Intrinsic::riscv_vsetvlimax_opt:
128804eeddc0SDimitry Andric       return selectVSETVLI(Node);
1289fe6060f1SDimitry Andric     }
1290fe6060f1SDimitry Andric     break;
1291fe6060f1SDimitry Andric   }
1292e8d8bef9SDimitry Andric   case ISD::INTRINSIC_W_CHAIN: {
1293e8d8bef9SDimitry Andric     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1294e8d8bef9SDimitry Andric     switch (IntNo) {
1295e8d8bef9SDimitry Andric       // By default we do not custom select any intrinsic.
1296e8d8bef9SDimitry Andric     default:
12970b57cec5SDimitry Andric       break;
1298fe6060f1SDimitry Andric     case Intrinsic::riscv_vsetvli:
129904eeddc0SDimitry Andric     case Intrinsic::riscv_vsetvlimax:
130004eeddc0SDimitry Andric       return selectVSETVLI(Node);
1301e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg2:
1302e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg3:
1303e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg4:
1304e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg5:
1305e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg6:
1306e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg7:
1307e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg8: {
1308fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
1309e8d8bef9SDimitry Andric       return;
1310e8d8bef9SDimitry Andric     }
1311e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg2_mask:
1312e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg3_mask:
1313e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg4_mask:
1314e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg5_mask:
1315e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg6_mask:
1316e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg7_mask:
1317e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlseg8_mask: {
1318fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1319e8d8bef9SDimitry Andric       return;
1320e8d8bef9SDimitry Andric     }
1321e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg2:
1322e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg3:
1323e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg4:
1324e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg5:
1325e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg6:
1326e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg7:
1327e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg8: {
1328fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1329e8d8bef9SDimitry Andric       return;
1330e8d8bef9SDimitry Andric     }
1331e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg2_mask:
1332e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg3_mask:
1333e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg4_mask:
1334e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg5_mask:
1335e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg6_mask:
1336e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg7_mask:
1337e8d8bef9SDimitry Andric     case Intrinsic::riscv_vlsseg8_mask: {
1338fe6060f1SDimitry Andric       selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1339e8d8bef9SDimitry Andric       return;
1340e8d8bef9SDimitry Andric     }
1341e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg2:
1342e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg3:
1343e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg4:
1344e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg5:
1345e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg6:
1346e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg7:
1347e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg8:
1348fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1349fe6060f1SDimitry Andric       return;
1350e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg2:
1351e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg3:
1352e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg4:
1353e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg5:
1354e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg6:
1355e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg7:
1356fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxseg8:
1357fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1358e8d8bef9SDimitry Andric       return;
1359e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg2_mask:
1360e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg3_mask:
1361e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg4_mask:
1362e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg5_mask:
1363e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg6_mask:
1364e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg7_mask:
1365e8d8bef9SDimitry Andric     case Intrinsic::riscv_vloxseg8_mask:
1366fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1367fe6060f1SDimitry Andric       return;
1368e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg2_mask:
1369e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg3_mask:
1370e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg4_mask:
1371e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg5_mask:
1372e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg6_mask:
1373e8d8bef9SDimitry Andric     case Intrinsic::riscv_vluxseg7_mask:
1374fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxseg8_mask:
1375fe6060f1SDimitry Andric       selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1376fe6060f1SDimitry Andric       return;
1377fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg8ff:
1378fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg7ff:
1379fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg6ff:
1380fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg5ff:
1381fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg4ff:
1382fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg3ff:
1383fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg2ff: {
1384fe6060f1SDimitry Andric       selectVLSEGFF(Node, /*IsMasked*/ false);
1385fe6060f1SDimitry Andric       return;
1386fe6060f1SDimitry Andric     }
1387fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg8ff_mask:
1388fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg7ff_mask:
1389fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg6ff_mask:
1390fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg5ff_mask:
1391fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg4ff_mask:
1392fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg3ff_mask:
1393fe6060f1SDimitry Andric     case Intrinsic::riscv_vlseg2ff_mask: {
1394fe6060f1SDimitry Andric       selectVLSEGFF(Node, /*IsMasked*/ true);
1395fe6060f1SDimitry Andric       return;
1396fe6060f1SDimitry Andric     }
1397fe6060f1SDimitry Andric     case Intrinsic::riscv_vloxei:
1398fe6060f1SDimitry Andric     case Intrinsic::riscv_vloxei_mask:
1399fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxei:
1400fe6060f1SDimitry Andric     case Intrinsic::riscv_vluxei_mask: {
1401fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask ||
1402fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vluxei_mask;
1403fe6060f1SDimitry Andric       bool IsOrdered = IntNo == Intrinsic::riscv_vloxei ||
1404fe6060f1SDimitry Andric                        IntNo == Intrinsic::riscv_vloxei_mask;
1405fe6060f1SDimitry Andric 
1406fe6060f1SDimitry Andric       MVT VT = Node->getSimpleValueType(0);
1407fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1408fe6060f1SDimitry Andric 
1409fe6060f1SDimitry Andric       unsigned CurOp = 2;
141004eeddc0SDimitry Andric       // Masked intrinsic only have TU version pseduo instructions.
141181ad6265SDimitry Andric       bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1412fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
141304eeddc0SDimitry Andric       if (IsTU)
1414fe6060f1SDimitry Andric         Operands.push_back(Node->getOperand(CurOp++));
141504eeddc0SDimitry Andric       else
141604eeddc0SDimitry Andric         // Skip the undef passthru operand for nomask TA version pseudo
141704eeddc0SDimitry Andric         CurOp++;
1418fe6060f1SDimitry Andric 
1419fe6060f1SDimitry Andric       MVT IndexVT;
1420fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1421fe6060f1SDimitry Andric                                  /*IsStridedOrIndexed*/ true, Operands,
1422349cc55cSDimitry Andric                                  /*IsLoad=*/true, &IndexVT);
1423fe6060f1SDimitry Andric 
1424fe6060f1SDimitry Andric       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1425fe6060f1SDimitry Andric              "Element count mismatch");
1426fe6060f1SDimitry Andric 
1427fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1428fe6060f1SDimitry Andric       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1429fe6060f1SDimitry Andric       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
143004eeddc0SDimitry Andric       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
143104eeddc0SDimitry Andric         report_fatal_error("The V extension does not support EEW=64 for index "
143204eeddc0SDimitry Andric                            "values when XLEN=32");
143304eeddc0SDimitry Andric       }
1434fe6060f1SDimitry Andric       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
143504eeddc0SDimitry Andric           IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
1436fe6060f1SDimitry Andric           static_cast<unsigned>(IndexLMUL));
1437fe6060f1SDimitry Andric       MachineSDNode *Load =
1438fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1439fe6060f1SDimitry Andric 
1440fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1441fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1442fe6060f1SDimitry Andric 
1443fe6060f1SDimitry Andric       ReplaceNode(Node, Load);
1444fe6060f1SDimitry Andric       return;
1445fe6060f1SDimitry Andric     }
1446349cc55cSDimitry Andric     case Intrinsic::riscv_vlm:
1447fe6060f1SDimitry Andric     case Intrinsic::riscv_vle:
1448fe6060f1SDimitry Andric     case Intrinsic::riscv_vle_mask:
1449fe6060f1SDimitry Andric     case Intrinsic::riscv_vlse:
1450fe6060f1SDimitry Andric     case Intrinsic::riscv_vlse_mask: {
1451fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vle_mask ||
1452fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vlse_mask;
1453fe6060f1SDimitry Andric       bool IsStrided =
1454fe6060f1SDimitry Andric           IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask;
1455fe6060f1SDimitry Andric 
1456fe6060f1SDimitry Andric       MVT VT = Node->getSimpleValueType(0);
1457fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1458fe6060f1SDimitry Andric 
1459fe6060f1SDimitry Andric       unsigned CurOp = 2;
146004eeddc0SDimitry Andric       // The riscv_vlm intrinsic are always tail agnostic and no passthru operand.
146104eeddc0SDimitry Andric       bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm;
146204eeddc0SDimitry Andric       // Masked intrinsic only have TU version pseduo instructions.
146381ad6265SDimitry Andric       bool IsTU = HasPassthruOperand &&
146481ad6265SDimitry Andric                   (IsMasked || !Node->getOperand(CurOp).isUndef());
1465fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
146604eeddc0SDimitry Andric       if (IsTU)
1467fe6060f1SDimitry Andric         Operands.push_back(Node->getOperand(CurOp++));
146804eeddc0SDimitry Andric       else if (HasPassthruOperand)
146904eeddc0SDimitry Andric         // Skip the undef passthru operand for nomask TA version pseudo
147004eeddc0SDimitry Andric         CurOp++;
1471fe6060f1SDimitry Andric 
1472fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1473349cc55cSDimitry Andric                                  Operands, /*IsLoad=*/true);
1474fe6060f1SDimitry Andric 
1475fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1476fe6060f1SDimitry Andric       const RISCV::VLEPseudo *P =
147704eeddc0SDimitry Andric           RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW,
1478fe6060f1SDimitry Andric                               static_cast<unsigned>(LMUL));
1479fe6060f1SDimitry Andric       MachineSDNode *Load =
1480fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1481fe6060f1SDimitry Andric 
1482fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1483fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1484fe6060f1SDimitry Andric 
1485fe6060f1SDimitry Andric       ReplaceNode(Node, Load);
1486fe6060f1SDimitry Andric       return;
1487fe6060f1SDimitry Andric     }
1488fe6060f1SDimitry Andric     case Intrinsic::riscv_vleff:
1489fe6060f1SDimitry Andric     case Intrinsic::riscv_vleff_mask: {
1490fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
1491fe6060f1SDimitry Andric 
1492fe6060f1SDimitry Andric       MVT VT = Node->getSimpleValueType(0);
1493fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1494fe6060f1SDimitry Andric 
1495fe6060f1SDimitry Andric       unsigned CurOp = 2;
149604eeddc0SDimitry Andric       // Masked intrinsic only have TU version pseduo instructions.
149781ad6265SDimitry Andric       bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef();
1498fe6060f1SDimitry Andric       SmallVector<SDValue, 7> Operands;
149904eeddc0SDimitry Andric       if (IsTU)
1500fe6060f1SDimitry Andric         Operands.push_back(Node->getOperand(CurOp++));
150104eeddc0SDimitry Andric       else
150204eeddc0SDimitry Andric         // Skip the undef passthru operand for nomask TA version pseudo
150304eeddc0SDimitry Andric         CurOp++;
1504fe6060f1SDimitry Andric 
1505fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1506349cc55cSDimitry Andric                                  /*IsStridedOrIndexed*/ false, Operands,
1507349cc55cSDimitry Andric                                  /*IsLoad=*/true);
1508fe6060f1SDimitry Andric 
1509fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1510fe6060f1SDimitry Andric       const RISCV::VLEPseudo *P =
151104eeddc0SDimitry Andric           RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
151204eeddc0SDimitry Andric                               Log2SEW, static_cast<unsigned>(LMUL));
151381ad6265SDimitry Andric       MachineSDNode *Load = CurDAG->getMachineNode(
151481ad6265SDimitry Andric           P->Pseudo, DL, Node->getVTList(), Operands);
1515fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1516fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
1517fe6060f1SDimitry Andric 
151881ad6265SDimitry Andric       ReplaceNode(Node, Load);
15190b57cec5SDimitry Andric       return;
15200b57cec5SDimitry Andric     }
15210b57cec5SDimitry Andric     }
15220b57cec5SDimitry Andric     break;
15230b57cec5SDimitry Andric   }
1524e8d8bef9SDimitry Andric   case ISD::INTRINSIC_VOID: {
1525e8d8bef9SDimitry Andric     unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1526e8d8bef9SDimitry Andric     switch (IntNo) {
1527e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg2:
1528e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg3:
1529e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg4:
1530e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg5:
1531e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg6:
1532e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg7:
1533e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg8: {
1534fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false);
15350b57cec5SDimitry Andric       return;
15360b57cec5SDimitry Andric     }
1537e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg2_mask:
1538e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg3_mask:
1539e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg4_mask:
1540e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg5_mask:
1541e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg6_mask:
1542e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg7_mask:
1543e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsseg8_mask: {
1544fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false);
1545e8d8bef9SDimitry Andric       return;
1546e8d8bef9SDimitry Andric     }
1547e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg2:
1548e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg3:
1549e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg4:
1550e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg5:
1551e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg6:
1552e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg7:
1553e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg8: {
1554fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true);
1555e8d8bef9SDimitry Andric       return;
1556e8d8bef9SDimitry Andric     }
1557e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg2_mask:
1558e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg3_mask:
1559e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg4_mask:
1560e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg5_mask:
1561e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg6_mask:
1562e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg7_mask:
1563e8d8bef9SDimitry Andric     case Intrinsic::riscv_vssseg8_mask: {
1564fe6060f1SDimitry Andric       selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true);
1565e8d8bef9SDimitry Andric       return;
1566e8d8bef9SDimitry Andric     }
1567e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg2:
1568e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg3:
1569e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg4:
1570e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg5:
1571e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg6:
1572e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg7:
1573e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg8:
1574fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true);
1575fe6060f1SDimitry Andric       return;
1576e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg2:
1577e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg3:
1578e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg4:
1579e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg5:
1580e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg6:
1581e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg7:
1582fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxseg8:
1583fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false);
1584e8d8bef9SDimitry Andric       return;
1585e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg2_mask:
1586e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg3_mask:
1587e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg4_mask:
1588e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg5_mask:
1589e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg6_mask:
1590e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg7_mask:
1591e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsoxseg8_mask:
1592fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true);
1593fe6060f1SDimitry Andric       return;
1594e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg2_mask:
1595e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg3_mask:
1596e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg4_mask:
1597e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg5_mask:
1598e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg6_mask:
1599e8d8bef9SDimitry Andric     case Intrinsic::riscv_vsuxseg7_mask:
1600fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxseg8_mask:
1601fe6060f1SDimitry Andric       selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false);
1602fe6060f1SDimitry Andric       return;
1603fe6060f1SDimitry Andric     case Intrinsic::riscv_vsoxei:
1604fe6060f1SDimitry Andric     case Intrinsic::riscv_vsoxei_mask:
1605fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxei:
1606fe6060f1SDimitry Andric     case Intrinsic::riscv_vsuxei_mask: {
1607fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask ||
1608fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vsuxei_mask;
1609fe6060f1SDimitry Andric       bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei ||
1610fe6060f1SDimitry Andric                        IntNo == Intrinsic::riscv_vsoxei_mask;
1611fe6060f1SDimitry Andric 
1612fe6060f1SDimitry Andric       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1613fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1614fe6060f1SDimitry Andric 
1615fe6060f1SDimitry Andric       unsigned CurOp = 2;
1616fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
1617fe6060f1SDimitry Andric       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1618fe6060f1SDimitry Andric 
1619fe6060f1SDimitry Andric       MVT IndexVT;
1620fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
1621fe6060f1SDimitry Andric                                  /*IsStridedOrIndexed*/ true, Operands,
1622349cc55cSDimitry Andric                                  /*IsLoad=*/false, &IndexVT);
1623fe6060f1SDimitry Andric 
1624fe6060f1SDimitry Andric       assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
1625fe6060f1SDimitry Andric              "Element count mismatch");
1626fe6060f1SDimitry Andric 
1627fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1628fe6060f1SDimitry Andric       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
1629fe6060f1SDimitry Andric       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
163004eeddc0SDimitry Andric       if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
163104eeddc0SDimitry Andric         report_fatal_error("The V extension does not support EEW=64 for index "
163204eeddc0SDimitry Andric                            "values when XLEN=32");
163304eeddc0SDimitry Andric       }
1634fe6060f1SDimitry Andric       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
163504eeddc0SDimitry Andric           IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW,
163604eeddc0SDimitry Andric           static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL));
1637fe6060f1SDimitry Andric       MachineSDNode *Store =
1638fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1639fe6060f1SDimitry Andric 
1640fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1641fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1642fe6060f1SDimitry Andric 
1643fe6060f1SDimitry Andric       ReplaceNode(Node, Store);
1644fe6060f1SDimitry Andric       return;
1645fe6060f1SDimitry Andric     }
1646349cc55cSDimitry Andric     case Intrinsic::riscv_vsm:
1647fe6060f1SDimitry Andric     case Intrinsic::riscv_vse:
1648fe6060f1SDimitry Andric     case Intrinsic::riscv_vse_mask:
1649fe6060f1SDimitry Andric     case Intrinsic::riscv_vsse:
1650fe6060f1SDimitry Andric     case Intrinsic::riscv_vsse_mask: {
1651fe6060f1SDimitry Andric       bool IsMasked = IntNo == Intrinsic::riscv_vse_mask ||
1652fe6060f1SDimitry Andric                       IntNo == Intrinsic::riscv_vsse_mask;
1653fe6060f1SDimitry Andric       bool IsStrided =
1654fe6060f1SDimitry Andric           IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask;
1655fe6060f1SDimitry Andric 
1656fe6060f1SDimitry Andric       MVT VT = Node->getOperand(2)->getSimpleValueType(0);
1657fe6060f1SDimitry Andric       unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1658fe6060f1SDimitry Andric 
1659fe6060f1SDimitry Andric       unsigned CurOp = 2;
1660fe6060f1SDimitry Andric       SmallVector<SDValue, 8> Operands;
1661fe6060f1SDimitry Andric       Operands.push_back(Node->getOperand(CurOp++)); // Store value.
1662fe6060f1SDimitry Andric 
1663fe6060f1SDimitry Andric       addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided,
1664fe6060f1SDimitry Andric                                  Operands);
1665fe6060f1SDimitry Andric 
1666fe6060f1SDimitry Andric       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1667fe6060f1SDimitry Andric       const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
1668fe6060f1SDimitry Andric           IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
1669fe6060f1SDimitry Andric       MachineSDNode *Store =
1670fe6060f1SDimitry Andric           CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
1671fe6060f1SDimitry Andric       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
1672fe6060f1SDimitry Andric         CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()});
1673fe6060f1SDimitry Andric 
1674fe6060f1SDimitry Andric       ReplaceNode(Node, Store);
1675e8d8bef9SDimitry Andric       return;
1676e8d8bef9SDimitry Andric     }
1677e8d8bef9SDimitry Andric     }
1678e8d8bef9SDimitry Andric     break;
1679e8d8bef9SDimitry Andric   }
1680fe6060f1SDimitry Andric   case ISD::BITCAST: {
1681fe6060f1SDimitry Andric     MVT SrcVT = Node->getOperand(0).getSimpleValueType();
1682fe6060f1SDimitry Andric     // Just drop bitcasts between vectors if both are fixed or both are
1683fe6060f1SDimitry Andric     // scalable.
1684fe6060f1SDimitry Andric     if ((VT.isScalableVector() && SrcVT.isScalableVector()) ||
1685fe6060f1SDimitry Andric         (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) {
1686fe6060f1SDimitry Andric       ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
1687fe6060f1SDimitry Andric       CurDAG->RemoveDeadNode(Node);
1688e8d8bef9SDimitry Andric       return;
1689e8d8bef9SDimitry Andric     }
1690fe6060f1SDimitry Andric     break;
1691fe6060f1SDimitry Andric   }
1692fe6060f1SDimitry Andric   case ISD::INSERT_SUBVECTOR: {
1693fe6060f1SDimitry Andric     SDValue V = Node->getOperand(0);
1694fe6060f1SDimitry Andric     SDValue SubV = Node->getOperand(1);
1695fe6060f1SDimitry Andric     SDLoc DL(SubV);
1696fe6060f1SDimitry Andric     auto Idx = Node->getConstantOperandVal(2);
1697fe6060f1SDimitry Andric     MVT SubVecVT = SubV.getSimpleValueType();
1698fe6060f1SDimitry Andric 
1699fe6060f1SDimitry Andric     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1700fe6060f1SDimitry Andric     MVT SubVecContainerVT = SubVecVT;
1701fe6060f1SDimitry Andric     // Establish the correct scalable-vector types for any fixed-length type.
1702fe6060f1SDimitry Andric     if (SubVecVT.isFixedLengthVector())
1703fe6060f1SDimitry Andric       SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
1704fe6060f1SDimitry Andric     if (VT.isFixedLengthVector())
1705fe6060f1SDimitry Andric       VT = TLI.getContainerForFixedLengthVector(VT);
1706fe6060f1SDimitry Andric 
1707fe6060f1SDimitry Andric     const auto *TRI = Subtarget->getRegisterInfo();
1708fe6060f1SDimitry Andric     unsigned SubRegIdx;
1709fe6060f1SDimitry Andric     std::tie(SubRegIdx, Idx) =
1710fe6060f1SDimitry Andric         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1711fe6060f1SDimitry Andric             VT, SubVecContainerVT, Idx, TRI);
1712fe6060f1SDimitry Andric 
1713fe6060f1SDimitry Andric     // If the Idx hasn't been completely eliminated then this is a subvector
1714fe6060f1SDimitry Andric     // insert which doesn't naturally align to a vector register. These must
1715fe6060f1SDimitry Andric     // be handled using instructions to manipulate the vector registers.
1716fe6060f1SDimitry Andric     if (Idx != 0)
1717fe6060f1SDimitry Andric       break;
1718fe6060f1SDimitry Andric 
1719fe6060f1SDimitry Andric     RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT);
1720fe6060f1SDimitry Andric     bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
1721fe6060f1SDimitry Andric                            SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
1722fe6060f1SDimitry Andric                            SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
1723fe6060f1SDimitry Andric     (void)IsSubVecPartReg; // Silence unused variable warning without asserts.
1724fe6060f1SDimitry Andric     assert((!IsSubVecPartReg || V.isUndef()) &&
1725fe6060f1SDimitry Andric            "Expecting lowering to have created legal INSERT_SUBVECTORs when "
1726fe6060f1SDimitry Andric            "the subvector is smaller than a full-sized register");
1727fe6060f1SDimitry Andric 
1728fe6060f1SDimitry Andric     // If we haven't set a SubRegIdx, then we must be going between
1729fe6060f1SDimitry Andric     // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1730fe6060f1SDimitry Andric     if (SubRegIdx == RISCV::NoSubRegister) {
1731fe6060f1SDimitry Andric       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT);
1732fe6060f1SDimitry Andric       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1733fe6060f1SDimitry Andric                  InRegClassID &&
1734fe6060f1SDimitry Andric              "Unexpected subvector extraction");
1735fe6060f1SDimitry Andric       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1736fe6060f1SDimitry Andric       SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1737fe6060f1SDimitry Andric                                                DL, VT, SubV, RC);
1738fe6060f1SDimitry Andric       ReplaceNode(Node, NewNode);
1739fe6060f1SDimitry Andric       return;
1740fe6060f1SDimitry Andric     }
1741fe6060f1SDimitry Andric 
1742fe6060f1SDimitry Andric     SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV);
1743fe6060f1SDimitry Andric     ReplaceNode(Node, Insert.getNode());
1744fe6060f1SDimitry Andric     return;
1745fe6060f1SDimitry Andric   }
1746fe6060f1SDimitry Andric   case ISD::EXTRACT_SUBVECTOR: {
1747fe6060f1SDimitry Andric     SDValue V = Node->getOperand(0);
1748fe6060f1SDimitry Andric     auto Idx = Node->getConstantOperandVal(1);
1749fe6060f1SDimitry Andric     MVT InVT = V.getSimpleValueType();
1750fe6060f1SDimitry Andric     SDLoc DL(V);
1751fe6060f1SDimitry Andric 
1752fe6060f1SDimitry Andric     const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
1753fe6060f1SDimitry Andric     MVT SubVecContainerVT = VT;
1754fe6060f1SDimitry Andric     // Establish the correct scalable-vector types for any fixed-length type.
1755fe6060f1SDimitry Andric     if (VT.isFixedLengthVector())
1756fe6060f1SDimitry Andric       SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
1757fe6060f1SDimitry Andric     if (InVT.isFixedLengthVector())
1758fe6060f1SDimitry Andric       InVT = TLI.getContainerForFixedLengthVector(InVT);
1759fe6060f1SDimitry Andric 
1760fe6060f1SDimitry Andric     const auto *TRI = Subtarget->getRegisterInfo();
1761fe6060f1SDimitry Andric     unsigned SubRegIdx;
1762fe6060f1SDimitry Andric     std::tie(SubRegIdx, Idx) =
1763fe6060f1SDimitry Andric         RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1764fe6060f1SDimitry Andric             InVT, SubVecContainerVT, Idx, TRI);
1765fe6060f1SDimitry Andric 
1766fe6060f1SDimitry Andric     // If the Idx hasn't been completely eliminated then this is a subvector
1767fe6060f1SDimitry Andric     // extract which doesn't naturally align to a vector register. These must
1768fe6060f1SDimitry Andric     // be handled using instructions to manipulate the vector registers.
1769fe6060f1SDimitry Andric     if (Idx != 0)
1770fe6060f1SDimitry Andric       break;
1771fe6060f1SDimitry Andric 
1772fe6060f1SDimitry Andric     // If we haven't set a SubRegIdx, then we must be going between
1773fe6060f1SDimitry Andric     // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy.
1774fe6060f1SDimitry Andric     if (SubRegIdx == RISCV::NoSubRegister) {
1775fe6060f1SDimitry Andric       unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT);
1776fe6060f1SDimitry Andric       assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) ==
1777fe6060f1SDimitry Andric                  InRegClassID &&
1778fe6060f1SDimitry Andric              "Unexpected subvector extraction");
1779fe6060f1SDimitry Andric       SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT);
1780fe6060f1SDimitry Andric       SDNode *NewNode =
1781fe6060f1SDimitry Andric           CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC);
1782fe6060f1SDimitry Andric       ReplaceNode(Node, NewNode);
1783fe6060f1SDimitry Andric       return;
1784fe6060f1SDimitry Andric     }
1785fe6060f1SDimitry Andric 
1786fe6060f1SDimitry Andric     SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V);
1787fe6060f1SDimitry Andric     ReplaceNode(Node, Extract.getNode());
1788fe6060f1SDimitry Andric     return;
1789fe6060f1SDimitry Andric   }
179004eeddc0SDimitry Andric   case RISCVISD::VMV_S_X_VL:
179104eeddc0SDimitry Andric   case RISCVISD::VFMV_S_F_VL:
1792fe6060f1SDimitry Andric   case RISCVISD::VMV_V_X_VL:
1793fe6060f1SDimitry Andric   case RISCVISD::VFMV_V_F_VL: {
1794*bdd1243dSDimitry Andric     // Only if we have optimized zero-stride vector load.
1795*bdd1243dSDimitry Andric     if (!Subtarget->hasOptimizedZeroStrideLoad())
1796*bdd1243dSDimitry Andric       break;
1797*bdd1243dSDimitry Andric 
1798fe6060f1SDimitry Andric     // Try to match splat of a scalar load to a strided load with stride of x0.
179904eeddc0SDimitry Andric     bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL ||
180004eeddc0SDimitry Andric                         Node->getOpcode() == RISCVISD::VFMV_S_F_VL;
1801*bdd1243dSDimitry Andric     if (!Node->getOperand(0).isUndef())
180204eeddc0SDimitry Andric       break;
1803*bdd1243dSDimitry Andric     SDValue Src = Node->getOperand(1);
1804fe6060f1SDimitry Andric     auto *Ld = dyn_cast<LoadSDNode>(Src);
1805fe6060f1SDimitry Andric     if (!Ld)
1806fe6060f1SDimitry Andric       break;
1807fe6060f1SDimitry Andric     EVT MemVT = Ld->getMemoryVT();
1808fe6060f1SDimitry Andric     // The memory VT should be the same size as the element type.
1809fe6060f1SDimitry Andric     if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize())
1810fe6060f1SDimitry Andric       break;
1811fe6060f1SDimitry Andric     if (!IsProfitableToFold(Src, Node, Node) ||
1812fe6060f1SDimitry Andric         !IsLegalToFold(Src, Node, Node, TM.getOptLevel()))
1813fe6060f1SDimitry Andric       break;
1814fe6060f1SDimitry Andric 
1815fe6060f1SDimitry Andric     SDValue VL;
1816*bdd1243dSDimitry Andric     if (IsScalarMove) {
181704eeddc0SDimitry Andric       // We could deal with more VL if we update the VSETVLI insert pass to
181804eeddc0SDimitry Andric       // avoid introducing more VSETVLI.
181904eeddc0SDimitry Andric       if (!isOneConstant(Node->getOperand(2)))
182004eeddc0SDimitry Andric         break;
182104eeddc0SDimitry Andric       selectVLOp(Node->getOperand(2), VL);
182204eeddc0SDimitry Andric     } else
182381ad6265SDimitry Andric       selectVLOp(Node->getOperand(2), VL);
1824fe6060f1SDimitry Andric 
1825fe6060f1SDimitry Andric     unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
1826fe6060f1SDimitry Andric     SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
1827fe6060f1SDimitry Andric 
1828fe6060f1SDimitry Andric     SDValue Operands[] = {Ld->getBasePtr(),
1829fe6060f1SDimitry Andric                           CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW,
1830fe6060f1SDimitry Andric                           Ld->getChain()};
1831fe6060f1SDimitry Andric 
1832fe6060f1SDimitry Andric     RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
1833fe6060f1SDimitry Andric     const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(
183404eeddc0SDimitry Andric         /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false,
183504eeddc0SDimitry Andric         Log2SEW, static_cast<unsigned>(LMUL));
1836fe6060f1SDimitry Andric     MachineSDNode *Load =
1837*bdd1243dSDimitry Andric         CurDAG->getMachineNode(P->Pseudo, DL, {VT, MVT::Other}, Operands);
1838*bdd1243dSDimitry Andric     // Update the chain.
1839*bdd1243dSDimitry Andric     ReplaceUses(Src.getValue(1), SDValue(Load, 1));
1840*bdd1243dSDimitry Andric     // Record the mem-refs
184181ad6265SDimitry Andric     CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()});
1842*bdd1243dSDimitry Andric     // Replace the splat with the vlse.
1843fe6060f1SDimitry Andric     ReplaceNode(Node, Load);
1844e8d8bef9SDimitry Andric     return;
1845e8d8bef9SDimitry Andric   }
1846e8d8bef9SDimitry Andric   }
18470b57cec5SDimitry Andric 
18480b57cec5SDimitry Andric   // Select the default instruction.
18490b57cec5SDimitry Andric   SelectCode(Node);
18500b57cec5SDimitry Andric }
18510b57cec5SDimitry Andric 
18520b57cec5SDimitry Andric bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand(
18530b57cec5SDimitry Andric     const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
18540b57cec5SDimitry Andric   switch (ConstraintID) {
18550b57cec5SDimitry Andric   case InlineAsm::Constraint_m:
18560b57cec5SDimitry Andric     // We just support simple memory operands that have a single address
18570b57cec5SDimitry Andric     // operand and need no special handling.
18580b57cec5SDimitry Andric     OutOps.push_back(Op);
18590b57cec5SDimitry Andric     return false;
18600b57cec5SDimitry Andric   case InlineAsm::Constraint_A:
18610b57cec5SDimitry Andric     OutOps.push_back(Op);
18620b57cec5SDimitry Andric     return false;
18630b57cec5SDimitry Andric   default:
18640b57cec5SDimitry Andric     break;
18650b57cec5SDimitry Andric   }
18660b57cec5SDimitry Andric 
18670b57cec5SDimitry Andric   return true;
18680b57cec5SDimitry Andric }
18690b57cec5SDimitry Andric 
187081ad6265SDimitry Andric bool RISCVDAGToDAGISel::SelectAddrFrameIndex(SDValue Addr, SDValue &Base,
187181ad6265SDimitry Andric                                              SDValue &Offset) {
1872fe6060f1SDimitry Andric   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
18730b57cec5SDimitry Andric     Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT());
187481ad6265SDimitry Andric     Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT());
18750b57cec5SDimitry Andric     return true;
18760b57cec5SDimitry Andric   }
187781ad6265SDimitry Andric 
187881ad6265SDimitry Andric   return false;
187981ad6265SDimitry Andric }
188081ad6265SDimitry Andric 
188181ad6265SDimitry Andric // Select a frame index and an optional immediate offset from an ADD or OR.
188281ad6265SDimitry Andric bool RISCVDAGToDAGISel::SelectFrameAddrRegImm(SDValue Addr, SDValue &Base,
188381ad6265SDimitry Andric                                               SDValue &Offset) {
188481ad6265SDimitry Andric   if (SelectAddrFrameIndex(Addr, Base, Offset))
188581ad6265SDimitry Andric     return true;
188681ad6265SDimitry Andric 
188781ad6265SDimitry Andric   if (!CurDAG->isBaseWithConstantOffset(Addr))
188881ad6265SDimitry Andric     return false;
188981ad6265SDimitry Andric 
189081ad6265SDimitry Andric   if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
189181ad6265SDimitry Andric     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
189281ad6265SDimitry Andric     if (isInt<12>(CVal)) {
189381ad6265SDimitry Andric       Base = CurDAG->getTargetFrameIndex(FIN->getIndex(),
189481ad6265SDimitry Andric                                          Subtarget->getXLenVT());
189581ad6265SDimitry Andric       Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr),
189681ad6265SDimitry Andric                                          Subtarget->getXLenVT());
189781ad6265SDimitry Andric       return true;
189881ad6265SDimitry Andric     }
189981ad6265SDimitry Andric   }
190081ad6265SDimitry Andric 
19010b57cec5SDimitry Andric   return false;
19020b57cec5SDimitry Andric }
19030b57cec5SDimitry Andric 
1904753f127fSDimitry Andric // Fold constant addresses.
1905753f127fSDimitry Andric static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL,
1906753f127fSDimitry Andric                                const MVT VT, const RISCVSubtarget *Subtarget,
1907753f127fSDimitry Andric                                SDValue Addr, SDValue &Base, SDValue &Offset) {
1908753f127fSDimitry Andric   if (!isa<ConstantSDNode>(Addr))
1909753f127fSDimitry Andric     return false;
1910753f127fSDimitry Andric 
1911753f127fSDimitry Andric   int64_t CVal = cast<ConstantSDNode>(Addr)->getSExtValue();
1912753f127fSDimitry Andric 
1913753f127fSDimitry Andric   // If the constant is a simm12, we can fold the whole constant and use X0 as
1914753f127fSDimitry Andric   // the base. If the constant can be materialized with LUI+simm12, use LUI as
1915753f127fSDimitry Andric   // the base. We can't use generateInstSeq because it favors LUI+ADDIW.
1916753f127fSDimitry Andric   int64_t Lo12 = SignExtend64<12>(CVal);
1917753f127fSDimitry Andric   int64_t Hi = (uint64_t)CVal - (uint64_t)Lo12;
1918753f127fSDimitry Andric   if (!Subtarget->is64Bit() || isInt<32>(Hi)) {
1919753f127fSDimitry Andric     if (Hi) {
1920753f127fSDimitry Andric       int64_t Hi20 = (Hi >> 12) & 0xfffff;
1921753f127fSDimitry Andric       Base = SDValue(
1922753f127fSDimitry Andric           CurDAG->getMachineNode(RISCV::LUI, DL, VT,
1923753f127fSDimitry Andric                                  CurDAG->getTargetConstant(Hi20, DL, VT)),
1924753f127fSDimitry Andric           0);
1925753f127fSDimitry Andric     } else {
1926753f127fSDimitry Andric       Base = CurDAG->getRegister(RISCV::X0, VT);
1927753f127fSDimitry Andric     }
1928753f127fSDimitry Andric     Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
1929753f127fSDimitry Andric     return true;
1930753f127fSDimitry Andric   }
1931753f127fSDimitry Andric 
1932753f127fSDimitry Andric   // Ask how constant materialization would handle this constant.
1933753f127fSDimitry Andric   RISCVMatInt::InstSeq Seq =
1934753f127fSDimitry Andric       RISCVMatInt::generateInstSeq(CVal, Subtarget->getFeatureBits());
1935753f127fSDimitry Andric 
1936753f127fSDimitry Andric   // If the last instruction would be an ADDI, we can fold its immediate and
1937753f127fSDimitry Andric   // emit the rest of the sequence as the base.
1938*bdd1243dSDimitry Andric   if (Seq.back().getOpcode() != RISCV::ADDI)
1939753f127fSDimitry Andric     return false;
1940*bdd1243dSDimitry Andric   Lo12 = Seq.back().getImm();
1941753f127fSDimitry Andric 
1942753f127fSDimitry Andric   // Drop the last instruction.
1943753f127fSDimitry Andric   Seq.pop_back();
1944753f127fSDimitry Andric   assert(!Seq.empty() && "Expected more instructions in sequence");
1945753f127fSDimitry Andric 
1946753f127fSDimitry Andric   Base = SDValue(selectImmSeq(CurDAG, DL, VT, Seq), 0);
1947753f127fSDimitry Andric   Offset = CurDAG->getTargetConstant(Lo12, DL, VT);
1948753f127fSDimitry Andric   return true;
1949753f127fSDimitry Andric }
1950753f127fSDimitry Andric 
1951753f127fSDimitry Andric // Is this ADD instruction only used as the base pointer of scalar loads and
1952753f127fSDimitry Andric // stores?
1953753f127fSDimitry Andric static bool isWorthFoldingAdd(SDValue Add) {
1954*bdd1243dSDimitry Andric   for (auto *Use : Add->uses()) {
1955753f127fSDimitry Andric     if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE &&
1956753f127fSDimitry Andric         Use->getOpcode() != ISD::ATOMIC_LOAD &&
1957753f127fSDimitry Andric         Use->getOpcode() != ISD::ATOMIC_STORE)
1958753f127fSDimitry Andric       return false;
1959753f127fSDimitry Andric     EVT VT = cast<MemSDNode>(Use)->getMemoryVT();
1960753f127fSDimitry Andric     if (!VT.isScalarInteger() && VT != MVT::f16 && VT != MVT::f32 &&
1961753f127fSDimitry Andric         VT != MVT::f64)
1962753f127fSDimitry Andric       return false;
1963753f127fSDimitry Andric     // Don't allow stores of the value. It must be used as the address.
1964753f127fSDimitry Andric     if (Use->getOpcode() == ISD::STORE &&
1965753f127fSDimitry Andric         cast<StoreSDNode>(Use)->getValue() == Add)
1966753f127fSDimitry Andric       return false;
1967753f127fSDimitry Andric     if (Use->getOpcode() == ISD::ATOMIC_STORE &&
1968753f127fSDimitry Andric         cast<AtomicSDNode>(Use)->getVal() == Add)
1969753f127fSDimitry Andric       return false;
1970753f127fSDimitry Andric   }
1971753f127fSDimitry Andric 
1972fe6060f1SDimitry Andric   return true;
1973e8d8bef9SDimitry Andric }
1974e8d8bef9SDimitry Andric 
197581ad6265SDimitry Andric bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base,
197681ad6265SDimitry Andric                                          SDValue &Offset) {
197781ad6265SDimitry Andric   if (SelectAddrFrameIndex(Addr, Base, Offset))
197881ad6265SDimitry Andric     return true;
197981ad6265SDimitry Andric 
198081ad6265SDimitry Andric   SDLoc DL(Addr);
198181ad6265SDimitry Andric   MVT VT = Addr.getSimpleValueType();
198281ad6265SDimitry Andric 
198381ad6265SDimitry Andric   if (Addr.getOpcode() == RISCVISD::ADD_LO) {
198481ad6265SDimitry Andric     Base = Addr.getOperand(0);
198581ad6265SDimitry Andric     Offset = Addr.getOperand(1);
198681ad6265SDimitry Andric     return true;
198781ad6265SDimitry Andric   }
198881ad6265SDimitry Andric 
198981ad6265SDimitry Andric   if (CurDAG->isBaseWithConstantOffset(Addr)) {
199081ad6265SDimitry Andric     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
199181ad6265SDimitry Andric     if (isInt<12>(CVal)) {
199281ad6265SDimitry Andric       Base = Addr.getOperand(0);
199381ad6265SDimitry Andric       if (Base.getOpcode() == RISCVISD::ADD_LO) {
199481ad6265SDimitry Andric         SDValue LoOperand = Base.getOperand(1);
199581ad6265SDimitry Andric         if (auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) {
199681ad6265SDimitry Andric           // If the Lo in (ADD_LO hi, lo) is a global variable's address
199781ad6265SDimitry Andric           // (its low part, really), then we can rely on the alignment of that
199881ad6265SDimitry Andric           // variable to provide a margin of safety before low part can overflow
199981ad6265SDimitry Andric           // the 12 bits of the load/store offset. Check if CVal falls within
200081ad6265SDimitry Andric           // that margin; if so (low part + CVal) can't overflow.
200181ad6265SDimitry Andric           const DataLayout &DL = CurDAG->getDataLayout();
200281ad6265SDimitry Andric           Align Alignment = commonAlignment(
200381ad6265SDimitry Andric               GA->getGlobal()->getPointerAlignment(DL), GA->getOffset());
200481ad6265SDimitry Andric           if (CVal == 0 || Alignment > CVal) {
200581ad6265SDimitry Andric             int64_t CombinedOffset = CVal + GA->getOffset();
200681ad6265SDimitry Andric             Base = Base.getOperand(0);
200781ad6265SDimitry Andric             Offset = CurDAG->getTargetGlobalAddress(
200881ad6265SDimitry Andric                 GA->getGlobal(), SDLoc(LoOperand), LoOperand.getValueType(),
200981ad6265SDimitry Andric                 CombinedOffset, GA->getTargetFlags());
201081ad6265SDimitry Andric             return true;
201181ad6265SDimitry Andric           }
201281ad6265SDimitry Andric         }
201381ad6265SDimitry Andric       }
201481ad6265SDimitry Andric 
201581ad6265SDimitry Andric       if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base))
201681ad6265SDimitry Andric         Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT);
201781ad6265SDimitry Andric       Offset = CurDAG->getTargetConstant(CVal, DL, VT);
201881ad6265SDimitry Andric       return true;
201981ad6265SDimitry Andric     }
202081ad6265SDimitry Andric   }
202181ad6265SDimitry Andric 
202281ad6265SDimitry Andric   // Handle ADD with large immediates.
202381ad6265SDimitry Andric   if (Addr.getOpcode() == ISD::ADD && isa<ConstantSDNode>(Addr.getOperand(1))) {
202481ad6265SDimitry Andric     int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
202581ad6265SDimitry Andric     assert(!isInt<12>(CVal) && "simm12 not already handled?");
202681ad6265SDimitry Andric 
2027753f127fSDimitry Andric     // Handle immediates in the range [-4096,-2049] or [2048, 4094]. We can use
2028753f127fSDimitry Andric     // an ADDI for part of the offset and fold the rest into the load/store.
2029753f127fSDimitry Andric     // This mirrors the AddiPair PatFrag in RISCVInstrInfo.td.
203081ad6265SDimitry Andric     if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) {
203181ad6265SDimitry Andric       int64_t Adj = CVal < 0 ? -2048 : 2047;
203281ad6265SDimitry Andric       Base = SDValue(
203381ad6265SDimitry Andric           CurDAG->getMachineNode(RISCV::ADDI, DL, VT, Addr.getOperand(0),
203481ad6265SDimitry Andric                                  CurDAG->getTargetConstant(Adj, DL, VT)),
203581ad6265SDimitry Andric           0);
203681ad6265SDimitry Andric       Offset = CurDAG->getTargetConstant(CVal - Adj, DL, VT);
203781ad6265SDimitry Andric       return true;
203881ad6265SDimitry Andric     }
2039753f127fSDimitry Andric 
2040753f127fSDimitry Andric     // For larger immediates, we might be able to save one instruction from
2041753f127fSDimitry Andric     // constant materialization by folding the Lo12 bits of the immediate into
2042753f127fSDimitry Andric     // the address. We should only do this if the ADD is only used by loads and
2043753f127fSDimitry Andric     // stores that can fold the lo12 bits. Otherwise, the ADD will get iseled
2044753f127fSDimitry Andric     // separately with the full materialized immediate creating extra
2045753f127fSDimitry Andric     // instructions.
2046753f127fSDimitry Andric     if (isWorthFoldingAdd(Addr) &&
2047753f127fSDimitry Andric         selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr.getOperand(1), Base,
2048753f127fSDimitry Andric                            Offset)) {
2049753f127fSDimitry Andric       // Insert an ADD instruction with the materialized Hi52 bits.
2050753f127fSDimitry Andric       Base = SDValue(
2051753f127fSDimitry Andric           CurDAG->getMachineNode(RISCV::ADD, DL, VT, Addr.getOperand(0), Base),
2052753f127fSDimitry Andric           0);
2053753f127fSDimitry Andric       return true;
205481ad6265SDimitry Andric     }
2055753f127fSDimitry Andric   }
2056753f127fSDimitry Andric 
2057753f127fSDimitry Andric   if (selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr, Base, Offset))
2058753f127fSDimitry Andric     return true;
205981ad6265SDimitry Andric 
206081ad6265SDimitry Andric   Base = Addr;
206181ad6265SDimitry Andric   Offset = CurDAG->getTargetConstant(0, DL, VT);
206281ad6265SDimitry Andric   return true;
206381ad6265SDimitry Andric }
206481ad6265SDimitry Andric 
2065fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
2066fe6060f1SDimitry Andric                                         SDValue &ShAmt) {
2067*bdd1243dSDimitry Andric   ShAmt = N;
2068*bdd1243dSDimitry Andric 
2069fe6060f1SDimitry Andric   // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift
2070fe6060f1SDimitry Andric   // amount. If there is an AND on the shift amount, we can bypass it if it
2071fe6060f1SDimitry Andric   // doesn't affect any of those bits.
2072*bdd1243dSDimitry Andric   if (ShAmt.getOpcode() == ISD::AND && isa<ConstantSDNode>(ShAmt.getOperand(1))) {
2073*bdd1243dSDimitry Andric     const APInt &AndMask = ShAmt.getConstantOperandAPInt(1);
2074979e22ffSDimitry Andric 
2075fe6060f1SDimitry Andric     // Since the max shift amount is a power of 2 we can subtract 1 to make a
2076fe6060f1SDimitry Andric     // mask that covers the bits needed to represent all shift amounts.
2077fe6060f1SDimitry Andric     assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
2078fe6060f1SDimitry Andric     APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
2079e8d8bef9SDimitry Andric 
2080fe6060f1SDimitry Andric     if (ShMask.isSubsetOf(AndMask)) {
2081*bdd1243dSDimitry Andric       ShAmt = ShAmt.getOperand(0);
2082*bdd1243dSDimitry Andric     } else {
2083fe6060f1SDimitry Andric       // SimplifyDemandedBits may have optimized the mask so try restoring any
2084fe6060f1SDimitry Andric       // bits that are known zero.
2085*bdd1243dSDimitry Andric       KnownBits Known = CurDAG->computeKnownBits(ShAmt.getOperand(0));
2086*bdd1243dSDimitry Andric       if (!ShMask.isSubsetOf(AndMask | Known.Zero))
2087*bdd1243dSDimitry Andric         return true;
2088*bdd1243dSDimitry Andric       ShAmt = ShAmt.getOperand(0);
2089*bdd1243dSDimitry Andric     }
2090*bdd1243dSDimitry Andric   }
2091*bdd1243dSDimitry Andric 
2092*bdd1243dSDimitry Andric   if (ShAmt.getOpcode() == ISD::ADD &&
2093*bdd1243dSDimitry Andric       isa<ConstantSDNode>(ShAmt.getOperand(1))) {
2094*bdd1243dSDimitry Andric     uint64_t Imm = ShAmt.getConstantOperandVal(1);
2095*bdd1243dSDimitry Andric     // If we are shifting by X+N where N == 0 mod Size, then just shift by X
2096*bdd1243dSDimitry Andric     // to avoid the ADD.
2097*bdd1243dSDimitry Andric     if (Imm != 0 && Imm % ShiftWidth == 0) {
2098*bdd1243dSDimitry Andric       ShAmt = ShAmt.getOperand(0);
2099fe6060f1SDimitry Andric       return true;
2100fe6060f1SDimitry Andric     }
2101*bdd1243dSDimitry Andric   } else if (ShAmt.getOpcode() == ISD::SUB &&
2102*bdd1243dSDimitry Andric              isa<ConstantSDNode>(ShAmt.getOperand(0))) {
2103*bdd1243dSDimitry Andric     uint64_t Imm = ShAmt.getConstantOperandVal(0);
210481ad6265SDimitry Andric     // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
210581ad6265SDimitry Andric     // generate a NEG instead of a SUB of a constant.
210681ad6265SDimitry Andric     if (Imm != 0 && Imm % ShiftWidth == 0) {
2107*bdd1243dSDimitry Andric       SDLoc DL(ShAmt);
2108*bdd1243dSDimitry Andric       EVT VT = ShAmt.getValueType();
210981ad6265SDimitry Andric       SDValue Zero = CurDAG->getRegister(RISCV::X0, VT);
211081ad6265SDimitry Andric       unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB;
211181ad6265SDimitry Andric       MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero,
2112*bdd1243dSDimitry Andric                                                   ShAmt.getOperand(1));
211381ad6265SDimitry Andric       ShAmt = SDValue(Neg, 0);
211481ad6265SDimitry Andric       return true;
211581ad6265SDimitry Andric     }
2116*bdd1243dSDimitry Andric     // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
2117*bdd1243dSDimitry Andric     // to generate a NOT instead of a SUB of a constant.
2118*bdd1243dSDimitry Andric     if (Imm % ShiftWidth == ShiftWidth - 1) {
2119*bdd1243dSDimitry Andric       SDLoc DL(ShAmt);
2120*bdd1243dSDimitry Andric       EVT VT = ShAmt.getValueType();
2121*bdd1243dSDimitry Andric       MachineSDNode *Not =
2122*bdd1243dSDimitry Andric           CurDAG->getMachineNode(RISCV::XORI, DL, VT, ShAmt.getOperand(1),
2123*bdd1243dSDimitry Andric                                  CurDAG->getTargetConstant(-1, DL, VT));
2124*bdd1243dSDimitry Andric       ShAmt = SDValue(Not, 0);
2125*bdd1243dSDimitry Andric       return true;
2126*bdd1243dSDimitry Andric     }
2127fe6060f1SDimitry Andric   }
2128fe6060f1SDimitry Andric 
2129fe6060f1SDimitry Andric   return true;
2130fe6060f1SDimitry Andric }
2131fe6060f1SDimitry Andric 
2132fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
2133fe6060f1SDimitry Andric   if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
2134fe6060f1SDimitry Andric       cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
2135fe6060f1SDimitry Andric     Val = N.getOperand(0);
2136fe6060f1SDimitry Andric     return true;
2137fe6060f1SDimitry Andric   }
2138fe6060f1SDimitry Andric   MVT VT = N.getSimpleValueType();
2139fe6060f1SDimitry Andric   if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
2140fe6060f1SDimitry Andric     Val = N;
2141fe6060f1SDimitry Andric     return true;
2142fe6060f1SDimitry Andric   }
2143fe6060f1SDimitry Andric 
2144fe6060f1SDimitry Andric   return false;
2145fe6060f1SDimitry Andric }
2146fe6060f1SDimitry Andric 
2147*bdd1243dSDimitry Andric bool RISCVDAGToDAGISel::selectZExtBits(SDValue N, unsigned Bits, SDValue &Val) {
2148fe6060f1SDimitry Andric   if (N.getOpcode() == ISD::AND) {
2149fe6060f1SDimitry Andric     auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
2150*bdd1243dSDimitry Andric     if (C && C->getZExtValue() == maskTrailingOnes<uint64_t>(Bits)) {
2151fe6060f1SDimitry Andric       Val = N.getOperand(0);
2152fe6060f1SDimitry Andric       return true;
2153fe6060f1SDimitry Andric     }
2154fe6060f1SDimitry Andric   }
2155fe6060f1SDimitry Andric   MVT VT = N.getSimpleValueType();
2156*bdd1243dSDimitry Andric   APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), Bits);
2157fe6060f1SDimitry Andric   if (CurDAG->MaskedValueIsZero(N, Mask)) {
2158fe6060f1SDimitry Andric     Val = N;
2159fe6060f1SDimitry Andric     return true;
2160fe6060f1SDimitry Andric   }
2161fe6060f1SDimitry Andric 
2162fe6060f1SDimitry Andric   return false;
2163fe6060f1SDimitry Andric }
2164fe6060f1SDimitry Andric 
2165753f127fSDimitry Andric /// Look for various patterns that can be done with a SHL that can be folded
2166753f127fSDimitry Andric /// into a SHXADD. \p ShAmt contains 1, 2, or 3 and is set based on which
2167753f127fSDimitry Andric /// SHXADD we are trying to match.
2168753f127fSDimitry Andric bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt,
2169753f127fSDimitry Andric                                        SDValue &Val) {
2170753f127fSDimitry Andric   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
2171753f127fSDimitry Andric     SDValue N0 = N.getOperand(0);
2172753f127fSDimitry Andric 
2173753f127fSDimitry Andric     bool LeftShift = N0.getOpcode() == ISD::SHL;
2174753f127fSDimitry Andric     if ((LeftShift || N0.getOpcode() == ISD::SRL) &&
2175753f127fSDimitry Andric         isa<ConstantSDNode>(N0.getOperand(1))) {
2176753f127fSDimitry Andric       uint64_t Mask = N.getConstantOperandVal(1);
2177753f127fSDimitry Andric       unsigned C2 = N0.getConstantOperandVal(1);
2178753f127fSDimitry Andric 
2179753f127fSDimitry Andric       unsigned XLen = Subtarget->getXLen();
2180753f127fSDimitry Andric       if (LeftShift)
2181753f127fSDimitry Andric         Mask &= maskTrailingZeros<uint64_t>(C2);
2182753f127fSDimitry Andric       else
2183753f127fSDimitry Andric         Mask &= maskTrailingOnes<uint64_t>(XLen - C2);
2184753f127fSDimitry Andric 
2185753f127fSDimitry Andric       // Look for (and (shl y, c2), c1) where c1 is a shifted mask with no
2186753f127fSDimitry Andric       // leading zeros and c3 trailing zeros. We can use an SRLI by c2+c3
2187753f127fSDimitry Andric       // followed by a SHXADD with c3 for the X amount.
2188753f127fSDimitry Andric       if (isShiftedMask_64(Mask)) {
2189*bdd1243dSDimitry Andric         unsigned Leading = XLen - llvm::bit_width(Mask);
2190753f127fSDimitry Andric         unsigned Trailing = countTrailingZeros(Mask);
2191753f127fSDimitry Andric         if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) {
2192753f127fSDimitry Andric           SDLoc DL(N);
2193753f127fSDimitry Andric           EVT VT = N.getValueType();
2194753f127fSDimitry Andric           Val = SDValue(CurDAG->getMachineNode(
2195753f127fSDimitry Andric                             RISCV::SRLI, DL, VT, N0.getOperand(0),
2196753f127fSDimitry Andric                             CurDAG->getTargetConstant(Trailing - C2, DL, VT)),
2197753f127fSDimitry Andric                         0);
2198753f127fSDimitry Andric           return true;
2199753f127fSDimitry Andric         }
2200753f127fSDimitry Andric         // Look for (and (shr y, c2), c1) where c1 is a shifted mask with c2
2201753f127fSDimitry Andric         // leading zeros and c3 trailing zeros. We can use an SRLI by C3
2202753f127fSDimitry Andric         // followed by a SHXADD using c3 for the X amount.
2203753f127fSDimitry Andric         if (!LeftShift && Leading == C2 && Trailing == ShAmt) {
2204753f127fSDimitry Andric           SDLoc DL(N);
2205753f127fSDimitry Andric           EVT VT = N.getValueType();
2206753f127fSDimitry Andric           Val = SDValue(
2207753f127fSDimitry Andric               CurDAG->getMachineNode(
2208753f127fSDimitry Andric                   RISCV::SRLI, DL, VT, N0.getOperand(0),
2209753f127fSDimitry Andric                   CurDAG->getTargetConstant(Leading + Trailing, DL, VT)),
2210753f127fSDimitry Andric               0);
2211753f127fSDimitry Andric           return true;
2212753f127fSDimitry Andric         }
2213753f127fSDimitry Andric       }
2214753f127fSDimitry Andric     }
2215753f127fSDimitry Andric   }
2216753f127fSDimitry Andric 
2217753f127fSDimitry Andric   bool LeftShift = N.getOpcode() == ISD::SHL;
2218753f127fSDimitry Andric   if ((LeftShift || N.getOpcode() == ISD::SRL) &&
2219753f127fSDimitry Andric       isa<ConstantSDNode>(N.getOperand(1))) {
2220753f127fSDimitry Andric     SDValue N0 = N.getOperand(0);
2221753f127fSDimitry Andric     if (N0.getOpcode() == ISD::AND && N0.hasOneUse() &&
2222753f127fSDimitry Andric         isa<ConstantSDNode>(N0.getOperand(1))) {
2223753f127fSDimitry Andric       uint64_t Mask = N0.getConstantOperandVal(1);
2224753f127fSDimitry Andric       if (isShiftedMask_64(Mask)) {
2225753f127fSDimitry Andric         unsigned C1 = N.getConstantOperandVal(1);
2226753f127fSDimitry Andric         unsigned XLen = Subtarget->getXLen();
2227*bdd1243dSDimitry Andric         unsigned Leading = XLen - llvm::bit_width(Mask);
2228753f127fSDimitry Andric         unsigned Trailing = countTrailingZeros(Mask);
2229753f127fSDimitry Andric         // Look for (shl (and X, Mask), C1) where Mask has 32 leading zeros and
2230753f127fSDimitry Andric         // C3 trailing zeros. If C1+C3==ShAmt we can use SRLIW+SHXADD.
2231753f127fSDimitry Andric         if (LeftShift && Leading == 32 && Trailing > 0 &&
2232753f127fSDimitry Andric             (Trailing + C1) == ShAmt) {
2233753f127fSDimitry Andric           SDLoc DL(N);
2234753f127fSDimitry Andric           EVT VT = N.getValueType();
2235753f127fSDimitry Andric           Val = SDValue(CurDAG->getMachineNode(
2236753f127fSDimitry Andric                             RISCV::SRLIW, DL, VT, N0.getOperand(0),
2237753f127fSDimitry Andric                             CurDAG->getTargetConstant(Trailing, DL, VT)),
2238753f127fSDimitry Andric                         0);
2239753f127fSDimitry Andric           return true;
2240753f127fSDimitry Andric         }
2241753f127fSDimitry Andric         // Look for (srl (and X, Mask), C1) where Mask has 32 leading zeros and
2242753f127fSDimitry Andric         // C3 trailing zeros. If C3-C1==ShAmt we can use SRLIW+SHXADD.
2243753f127fSDimitry Andric         if (!LeftShift && Leading == 32 && Trailing > C1 &&
2244753f127fSDimitry Andric             (Trailing - C1) == ShAmt) {
2245753f127fSDimitry Andric           SDLoc DL(N);
2246753f127fSDimitry Andric           EVT VT = N.getValueType();
2247753f127fSDimitry Andric           Val = SDValue(CurDAG->getMachineNode(
2248753f127fSDimitry Andric                             RISCV::SRLIW, DL, VT, N0.getOperand(0),
2249753f127fSDimitry Andric                             CurDAG->getTargetConstant(Trailing, DL, VT)),
2250753f127fSDimitry Andric                         0);
2251753f127fSDimitry Andric           return true;
2252753f127fSDimitry Andric         }
2253753f127fSDimitry Andric       }
2254753f127fSDimitry Andric     }
2255753f127fSDimitry Andric   }
2256753f127fSDimitry Andric 
2257753f127fSDimitry Andric   return false;
2258753f127fSDimitry Andric }
2259753f127fSDimitry Andric 
2260*bdd1243dSDimitry Andric /// Look for various patterns that can be done with a SHL that can be folded
2261*bdd1243dSDimitry Andric /// into a SHXADD_UW. \p ShAmt contains 1, 2, or 3 and is set based on which
2262*bdd1243dSDimitry Andric /// SHXADD_UW we are trying to match.
2263*bdd1243dSDimitry Andric bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
2264*bdd1243dSDimitry Andric                                           SDValue &Val) {
2265*bdd1243dSDimitry Andric   if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1)) &&
2266*bdd1243dSDimitry Andric       N.hasOneUse()) {
2267*bdd1243dSDimitry Andric     SDValue N0 = N.getOperand(0);
2268*bdd1243dSDimitry Andric     if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) &&
2269*bdd1243dSDimitry Andric         N0.hasOneUse()) {
2270*bdd1243dSDimitry Andric       uint64_t Mask = N.getConstantOperandVal(1);
2271*bdd1243dSDimitry Andric       unsigned C2 = N0.getConstantOperandVal(1);
2272*bdd1243dSDimitry Andric 
2273*bdd1243dSDimitry Andric       Mask &= maskTrailingZeros<uint64_t>(C2);
2274*bdd1243dSDimitry Andric 
2275*bdd1243dSDimitry Andric       // Look for (and (shl y, c2), c1) where c1 is a shifted mask with
2276*bdd1243dSDimitry Andric       // 32-ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
2277*bdd1243dSDimitry Andric       // c2-ShAmt followed by SHXADD_UW with ShAmt for the X amount.
2278*bdd1243dSDimitry Andric       if (isShiftedMask_64(Mask)) {
2279*bdd1243dSDimitry Andric         unsigned Leading = countLeadingZeros(Mask);
2280*bdd1243dSDimitry Andric         unsigned Trailing = countTrailingZeros(Mask);
2281*bdd1243dSDimitry Andric         if (Leading == 32 - ShAmt && Trailing == C2 && Trailing > ShAmt) {
2282*bdd1243dSDimitry Andric           SDLoc DL(N);
2283*bdd1243dSDimitry Andric           EVT VT = N.getValueType();
2284*bdd1243dSDimitry Andric           Val = SDValue(CurDAG->getMachineNode(
2285*bdd1243dSDimitry Andric                             RISCV::SLLI, DL, VT, N0.getOperand(0),
2286*bdd1243dSDimitry Andric                             CurDAG->getTargetConstant(C2 - ShAmt, DL, VT)),
2287*bdd1243dSDimitry Andric                         0);
2288*bdd1243dSDimitry Andric           return true;
2289*bdd1243dSDimitry Andric         }
2290*bdd1243dSDimitry Andric       }
2291*bdd1243dSDimitry Andric     }
2292*bdd1243dSDimitry Andric   }
2293*bdd1243dSDimitry Andric 
2294*bdd1243dSDimitry Andric   return false;
2295*bdd1243dSDimitry Andric }
2296*bdd1243dSDimitry Andric 
2297349cc55cSDimitry Andric // Return true if all users of this SDNode* only consume the lower \p Bits.
2298349cc55cSDimitry Andric // This can be used to form W instructions for add/sub/mul/shl even when the
2299349cc55cSDimitry Andric // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if
2300349cc55cSDimitry Andric // SimplifyDemandedBits has made it so some users see a sext_inreg and some
2301349cc55cSDimitry Andric // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave
2302349cc55cSDimitry Andric // the add/sub/mul/shl to become non-W instructions. By checking the users we
2303349cc55cSDimitry Andric // may be able to use a W instruction and CSE with the other instruction if
2304349cc55cSDimitry Andric // this has happened. We could try to detect that the CSE opportunity exists
2305349cc55cSDimitry Andric // before doing this, but that would be more complicated.
2306*bdd1243dSDimitry Andric bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits,
2307*bdd1243dSDimitry Andric                                         const unsigned Depth) const {
2308349cc55cSDimitry Andric   assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB ||
2309349cc55cSDimitry Andric           Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL ||
2310*bdd1243dSDimitry Andric           Node->getOpcode() == ISD::SRL || Node->getOpcode() == ISD::AND ||
2311*bdd1243dSDimitry Andric           Node->getOpcode() == ISD::OR || Node->getOpcode() == ISD::XOR ||
2312349cc55cSDimitry Andric           Node->getOpcode() == ISD::SIGN_EXTEND_INREG ||
2313*bdd1243dSDimitry Andric           isa<ConstantSDNode>(Node) || Depth != 0) &&
2314349cc55cSDimitry Andric          "Unexpected opcode");
2315349cc55cSDimitry Andric 
2316*bdd1243dSDimitry Andric   if (Depth >= SelectionDAG::MaxRecursionDepth)
2317*bdd1243dSDimitry Andric     return false;
2318*bdd1243dSDimitry Andric 
2319349cc55cSDimitry Andric   for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
2320349cc55cSDimitry Andric     SDNode *User = *UI;
2321349cc55cSDimitry Andric     // Users of this node should have already been instruction selected
2322349cc55cSDimitry Andric     if (!User->isMachineOpcode())
2323349cc55cSDimitry Andric       return false;
2324349cc55cSDimitry Andric 
2325349cc55cSDimitry Andric     // TODO: Add more opcodes?
2326349cc55cSDimitry Andric     switch (User->getMachineOpcode()) {
2327349cc55cSDimitry Andric     default:
2328349cc55cSDimitry Andric       return false;
2329349cc55cSDimitry Andric     case RISCV::ADDW:
2330349cc55cSDimitry Andric     case RISCV::ADDIW:
2331349cc55cSDimitry Andric     case RISCV::SUBW:
2332349cc55cSDimitry Andric     case RISCV::MULW:
2333349cc55cSDimitry Andric     case RISCV::SLLW:
2334349cc55cSDimitry Andric     case RISCV::SLLIW:
2335349cc55cSDimitry Andric     case RISCV::SRAW:
2336349cc55cSDimitry Andric     case RISCV::SRAIW:
2337349cc55cSDimitry Andric     case RISCV::SRLW:
2338349cc55cSDimitry Andric     case RISCV::SRLIW:
2339349cc55cSDimitry Andric     case RISCV::DIVW:
2340349cc55cSDimitry Andric     case RISCV::DIVUW:
2341349cc55cSDimitry Andric     case RISCV::REMW:
2342349cc55cSDimitry Andric     case RISCV::REMUW:
2343349cc55cSDimitry Andric     case RISCV::ROLW:
2344349cc55cSDimitry Andric     case RISCV::RORW:
2345349cc55cSDimitry Andric     case RISCV::RORIW:
2346349cc55cSDimitry Andric     case RISCV::CLZW:
2347349cc55cSDimitry Andric     case RISCV::CTZW:
2348349cc55cSDimitry Andric     case RISCV::CPOPW:
23491fd87a68SDimitry Andric     case RISCV::SLLI_UW:
235081ad6265SDimitry Andric     case RISCV::FMV_W_X:
2351349cc55cSDimitry Andric     case RISCV::FCVT_H_W:
2352349cc55cSDimitry Andric     case RISCV::FCVT_H_WU:
2353349cc55cSDimitry Andric     case RISCV::FCVT_S_W:
2354349cc55cSDimitry Andric     case RISCV::FCVT_S_WU:
2355349cc55cSDimitry Andric     case RISCV::FCVT_D_W:
2356349cc55cSDimitry Andric     case RISCV::FCVT_D_WU:
2357349cc55cSDimitry Andric       if (Bits < 32)
2358349cc55cSDimitry Andric         return false;
2359349cc55cSDimitry Andric       break;
2360*bdd1243dSDimitry Andric     case RISCV::SLL:
2361*bdd1243dSDimitry Andric     case RISCV::SRA:
2362*bdd1243dSDimitry Andric     case RISCV::SRL:
2363*bdd1243dSDimitry Andric     case RISCV::ROL:
2364*bdd1243dSDimitry Andric     case RISCV::ROR:
2365*bdd1243dSDimitry Andric     case RISCV::BSET:
2366*bdd1243dSDimitry Andric     case RISCV::BCLR:
2367*bdd1243dSDimitry Andric     case RISCV::BINV:
2368*bdd1243dSDimitry Andric       // Shift amount operands only use log2(Xlen) bits.
2369*bdd1243dSDimitry Andric       if (UI.getOperandNo() != 1 || Bits < Log2_32(Subtarget->getXLen()))
2370*bdd1243dSDimitry Andric         return false;
2371*bdd1243dSDimitry Andric       break;
2372349cc55cSDimitry Andric     case RISCV::SLLI:
2373349cc55cSDimitry Andric       // SLLI only uses the lower (XLen - ShAmt) bits.
2374349cc55cSDimitry Andric       if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1))
2375349cc55cSDimitry Andric         return false;
2376349cc55cSDimitry Andric       break;
237704eeddc0SDimitry Andric     case RISCV::ANDI:
2378*bdd1243dSDimitry Andric       if (Bits >= (unsigned)llvm::bit_width(User->getConstantOperandVal(1)))
237904eeddc0SDimitry Andric         break;
2380*bdd1243dSDimitry Andric       goto RecCheck;
2381*bdd1243dSDimitry Andric     case RISCV::ORI: {
2382*bdd1243dSDimitry Andric       uint64_t Imm = cast<ConstantSDNode>(User->getOperand(1))->getSExtValue();
2383*bdd1243dSDimitry Andric       if (Bits >= (unsigned)llvm::bit_width<uint64_t>(~Imm))
2384*bdd1243dSDimitry Andric         break;
2385*bdd1243dSDimitry Andric       [[fallthrough]];
2386*bdd1243dSDimitry Andric     }
2387*bdd1243dSDimitry Andric     case RISCV::AND:
2388*bdd1243dSDimitry Andric     case RISCV::OR:
2389*bdd1243dSDimitry Andric     case RISCV::XOR:
2390*bdd1243dSDimitry Andric     case RISCV::XORI:
2391*bdd1243dSDimitry Andric     case RISCV::ANDN:
2392*bdd1243dSDimitry Andric     case RISCV::ORN:
2393*bdd1243dSDimitry Andric     case RISCV::XNOR:
2394*bdd1243dSDimitry Andric     case RISCV::SH1ADD:
2395*bdd1243dSDimitry Andric     case RISCV::SH2ADD:
2396*bdd1243dSDimitry Andric     case RISCV::SH3ADD:
2397*bdd1243dSDimitry Andric     RecCheck:
2398*bdd1243dSDimitry Andric       if (hasAllNBitUsers(User, Bits, Depth + 1))
2399*bdd1243dSDimitry Andric         break;
2400*bdd1243dSDimitry Andric       return false;
2401*bdd1243dSDimitry Andric     case RISCV::SRLI: {
2402*bdd1243dSDimitry Andric       unsigned ShAmt = User->getConstantOperandVal(1);
2403*bdd1243dSDimitry Andric       // If we are shifting right by less than Bits, and users don't demand any
2404*bdd1243dSDimitry Andric       // bits that were shifted into [Bits-1:0], then we can consider this as an
2405*bdd1243dSDimitry Andric       // N-Bit user.
2406*bdd1243dSDimitry Andric       if (Bits > ShAmt && hasAllNBitUsers(User, Bits - ShAmt, Depth + 1))
2407*bdd1243dSDimitry Andric         break;
2408*bdd1243dSDimitry Andric       return false;
2409*bdd1243dSDimitry Andric     }
24101fd87a68SDimitry Andric     case RISCV::SEXT_B:
2411*bdd1243dSDimitry Andric     case RISCV::PACKH:
241204eeddc0SDimitry Andric       if (Bits < 8)
241304eeddc0SDimitry Andric         return false;
241404eeddc0SDimitry Andric       break;
24151fd87a68SDimitry Andric     case RISCV::SEXT_H:
241681ad6265SDimitry Andric     case RISCV::FMV_H_X:
24171fd87a68SDimitry Andric     case RISCV::ZEXT_H_RV32:
24181fd87a68SDimitry Andric     case RISCV::ZEXT_H_RV64:
2419*bdd1243dSDimitry Andric     case RISCV::PACKW:
242004eeddc0SDimitry Andric       if (Bits < 16)
242104eeddc0SDimitry Andric         return false;
242204eeddc0SDimitry Andric       break;
2423*bdd1243dSDimitry Andric     case RISCV::PACK:
2424*bdd1243dSDimitry Andric       if (Bits < (Subtarget->getXLen() / 2))
2425*bdd1243dSDimitry Andric         return false;
2426*bdd1243dSDimitry Andric       break;
24271fd87a68SDimitry Andric     case RISCV::ADD_UW:
24281fd87a68SDimitry Andric     case RISCV::SH1ADD_UW:
24291fd87a68SDimitry Andric     case RISCV::SH2ADD_UW:
24301fd87a68SDimitry Andric     case RISCV::SH3ADD_UW:
2431349cc55cSDimitry Andric       // The first operand to add.uw/shXadd.uw is implicitly zero extended from
2432349cc55cSDimitry Andric       // 32 bits.
2433349cc55cSDimitry Andric       if (UI.getOperandNo() != 0 || Bits < 32)
2434349cc55cSDimitry Andric         return false;
2435349cc55cSDimitry Andric       break;
2436349cc55cSDimitry Andric     case RISCV::SB:
2437349cc55cSDimitry Andric       if (UI.getOperandNo() != 0 || Bits < 8)
2438349cc55cSDimitry Andric         return false;
2439349cc55cSDimitry Andric       break;
2440349cc55cSDimitry Andric     case RISCV::SH:
2441349cc55cSDimitry Andric       if (UI.getOperandNo() != 0 || Bits < 16)
2442349cc55cSDimitry Andric         return false;
2443349cc55cSDimitry Andric       break;
2444349cc55cSDimitry Andric     case RISCV::SW:
2445349cc55cSDimitry Andric       if (UI.getOperandNo() != 0 || Bits < 32)
2446349cc55cSDimitry Andric         return false;
2447349cc55cSDimitry Andric       break;
2448349cc55cSDimitry Andric     }
2449349cc55cSDimitry Andric   }
2450349cc55cSDimitry Andric 
2451349cc55cSDimitry Andric   return true;
2452349cc55cSDimitry Andric }
2453349cc55cSDimitry Andric 
2454fe6060f1SDimitry Andric // Select VL as a 5 bit immediate or a value that will become a register. This
2455fe6060f1SDimitry Andric // allows us to choose betwen VSETIVLI or VSETVLI later.
2456d409305fSDimitry Andric bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
2457d409305fSDimitry Andric   auto *C = dyn_cast<ConstantSDNode>(N);
245881ad6265SDimitry Andric   if (C && isUInt<5>(C->getZExtValue())) {
2459fe6060f1SDimitry Andric     VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N),
2460fe6060f1SDimitry Andric                                    N->getValueType(0));
246181ad6265SDimitry Andric   } else if (C && C->isAllOnesValue()) {
246281ad6265SDimitry Andric     // Treat all ones as VLMax.
246381ad6265SDimitry Andric     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
246481ad6265SDimitry Andric                                    N->getValueType(0));
246581ad6265SDimitry Andric   } else if (isa<RegisterSDNode>(N) &&
246681ad6265SDimitry Andric              cast<RegisterSDNode>(N)->getReg() == RISCV::X0) {
246781ad6265SDimitry Andric     // All our VL operands use an operand that allows GPRNoX0 or an immediate
246881ad6265SDimitry Andric     // as the register class. Convert X0 to a special immediate to pass the
246981ad6265SDimitry Andric     // MachineVerifier. This is recognized specially by the vsetvli insertion
247081ad6265SDimitry Andric     // pass.
247181ad6265SDimitry Andric     VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N),
247281ad6265SDimitry Andric                                    N->getValueType(0));
247381ad6265SDimitry Andric   } else {
2474d409305fSDimitry Andric     VL = N;
247581ad6265SDimitry Andric   }
2476d409305fSDimitry Andric 
2477d409305fSDimitry Andric   return true;
2478d409305fSDimitry Andric }
2479d409305fSDimitry Andric 
2480e8d8bef9SDimitry Andric bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
248181ad6265SDimitry Andric   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
2482e8d8bef9SDimitry Andric     return false;
2483*bdd1243dSDimitry Andric   assert(N.getNumOperands() == 3 && "Unexpected number of operands");
248481ad6265SDimitry Andric   SplatVal = N.getOperand(1);
2485979e22ffSDimitry Andric   return true;
2486979e22ffSDimitry Andric }
2487e8d8bef9SDimitry Andric 
2488fe6060f1SDimitry Andric using ValidateFn = bool (*)(int64_t);
2489fe6060f1SDimitry Andric 
2490fe6060f1SDimitry Andric static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
2491fe6060f1SDimitry Andric                                    SelectionDAG &DAG,
2492fe6060f1SDimitry Andric                                    const RISCVSubtarget &Subtarget,
2493fe6060f1SDimitry Andric                                    ValidateFn ValidateImm) {
249481ad6265SDimitry Andric   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
249581ad6265SDimitry Andric       !isa<ConstantSDNode>(N.getOperand(1)))
2496979e22ffSDimitry Andric     return false;
2497*bdd1243dSDimitry Andric   assert(N.getNumOperands() == 3 && "Unexpected number of operands");
2498e8d8bef9SDimitry Andric 
249981ad6265SDimitry Andric   int64_t SplatImm =
250081ad6265SDimitry Andric       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2501e8d8bef9SDimitry Andric 
250281ad6265SDimitry Andric   // The semantics of RISCVISD::VMV_V_X_VL is that when the operand
250381ad6265SDimitry Andric   // type is wider than the resulting vector element type: an implicit
250481ad6265SDimitry Andric   // truncation first takes place. Therefore, perform a manual
250581ad6265SDimitry Andric   // truncation/sign-extension in order to ignore any truncated bits and catch
250681ad6265SDimitry Andric   // any zero-extended immediate.
2507e8d8bef9SDimitry Andric   // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
2508e8d8bef9SDimitry Andric   // sign-extending to (XLenVT -1).
2509fe6060f1SDimitry Andric   MVT XLenVT = Subtarget.getXLenVT();
251081ad6265SDimitry Andric   assert(XLenVT == N.getOperand(1).getSimpleValueType() &&
2511e8d8bef9SDimitry Andric          "Unexpected splat operand type");
2512fe6060f1SDimitry Andric   MVT EltVT = N.getSimpleValueType().getVectorElementType();
2513fe6060f1SDimitry Andric   if (EltVT.bitsLT(XLenVT))
2514e8d8bef9SDimitry Andric     SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
2515979e22ffSDimitry Andric 
2516fe6060f1SDimitry Andric   if (!ValidateImm(SplatImm))
2517e8d8bef9SDimitry Andric     return false;
2518979e22ffSDimitry Andric 
2519fe6060f1SDimitry Andric   SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
2520979e22ffSDimitry Andric   return true;
2521979e22ffSDimitry Andric }
2522e8d8bef9SDimitry Andric 
2523fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
2524fe6060f1SDimitry Andric   return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
2525fe6060f1SDimitry Andric                                 [](int64_t Imm) { return isInt<5>(Imm); });
2526fe6060f1SDimitry Andric }
2527fe6060f1SDimitry Andric 
2528fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
2529fe6060f1SDimitry Andric   return selectVSplatSimmHelper(
2530fe6060f1SDimitry Andric       N, SplatVal, *CurDAG, *Subtarget,
2531fe6060f1SDimitry Andric       [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
2532fe6060f1SDimitry Andric }
2533fe6060f1SDimitry Andric 
2534fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
2535fe6060f1SDimitry Andric                                                       SDValue &SplatVal) {
2536fe6060f1SDimitry Andric   return selectVSplatSimmHelper(
2537fe6060f1SDimitry Andric       N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
2538fe6060f1SDimitry Andric         return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
2539fe6060f1SDimitry Andric       });
2540fe6060f1SDimitry Andric }
2541fe6060f1SDimitry Andric 
2542e8d8bef9SDimitry Andric bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
254381ad6265SDimitry Andric   if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
254481ad6265SDimitry Andric       !isa<ConstantSDNode>(N.getOperand(1)))
2545979e22ffSDimitry Andric     return false;
2546979e22ffSDimitry Andric 
254781ad6265SDimitry Andric   int64_t SplatImm =
254881ad6265SDimitry Andric       cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
2549979e22ffSDimitry Andric 
2550e8d8bef9SDimitry Andric   if (!isUInt<5>(SplatImm))
2551e8d8bef9SDimitry Andric     return false;
2552e8d8bef9SDimitry Andric 
2553e8d8bef9SDimitry Andric   SplatVal =
2554e8d8bef9SDimitry Andric       CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT());
2555e8d8bef9SDimitry Andric 
2556979e22ffSDimitry Andric   return true;
2557979e22ffSDimitry Andric }
2558979e22ffSDimitry Andric 
2559fe6060f1SDimitry Andric bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width,
2560fe6060f1SDimitry Andric                                        SDValue &Imm) {
2561fe6060f1SDimitry Andric   if (auto *C = dyn_cast<ConstantSDNode>(N)) {
2562fe6060f1SDimitry Andric     int64_t ImmVal = SignExtend64(C->getSExtValue(), Width);
2563fe6060f1SDimitry Andric 
2564fe6060f1SDimitry Andric     if (!isInt<5>(ImmVal))
2565fe6060f1SDimitry Andric       return false;
2566fe6060f1SDimitry Andric 
2567fe6060f1SDimitry Andric     Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT());
2568fe6060f1SDimitry Andric     return true;
2569fe6060f1SDimitry Andric   }
2570fe6060f1SDimitry Andric 
2571fe6060f1SDimitry Andric   return false;
2572fe6060f1SDimitry Andric }
2573fe6060f1SDimitry Andric 
2574349cc55cSDimitry Andric // Try to remove sext.w if the input is a W instruction or can be made into
2575349cc55cSDimitry Andric // a W instruction cheaply.
2576349cc55cSDimitry Andric bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) {
2577349cc55cSDimitry Andric   // Look for the sext.w pattern, addiw rd, rs1, 0.
2578349cc55cSDimitry Andric   if (N->getMachineOpcode() != RISCV::ADDIW ||
2579349cc55cSDimitry Andric       !isNullConstant(N->getOperand(1)))
2580349cc55cSDimitry Andric     return false;
2581349cc55cSDimitry Andric 
2582349cc55cSDimitry Andric   SDValue N0 = N->getOperand(0);
2583349cc55cSDimitry Andric   if (!N0.isMachineOpcode())
2584349cc55cSDimitry Andric     return false;
2585349cc55cSDimitry Andric 
2586349cc55cSDimitry Andric   switch (N0.getMachineOpcode()) {
2587349cc55cSDimitry Andric   default:
2588349cc55cSDimitry Andric     break;
2589349cc55cSDimitry Andric   case RISCV::ADD:
2590349cc55cSDimitry Andric   case RISCV::ADDI:
2591349cc55cSDimitry Andric   case RISCV::SUB:
2592349cc55cSDimitry Andric   case RISCV::MUL:
2593349cc55cSDimitry Andric   case RISCV::SLLI: {
2594349cc55cSDimitry Andric     // Convert sext.w+add/sub/mul to their W instructions. This will create
2595349cc55cSDimitry Andric     // a new independent instruction. This improves latency.
2596349cc55cSDimitry Andric     unsigned Opc;
2597349cc55cSDimitry Andric     switch (N0.getMachineOpcode()) {
2598349cc55cSDimitry Andric     default:
2599349cc55cSDimitry Andric       llvm_unreachable("Unexpected opcode!");
2600349cc55cSDimitry Andric     case RISCV::ADD:  Opc = RISCV::ADDW;  break;
2601349cc55cSDimitry Andric     case RISCV::ADDI: Opc = RISCV::ADDIW; break;
2602349cc55cSDimitry Andric     case RISCV::SUB:  Opc = RISCV::SUBW;  break;
2603349cc55cSDimitry Andric     case RISCV::MUL:  Opc = RISCV::MULW;  break;
2604349cc55cSDimitry Andric     case RISCV::SLLI: Opc = RISCV::SLLIW; break;
2605349cc55cSDimitry Andric     }
2606349cc55cSDimitry Andric 
2607349cc55cSDimitry Andric     SDValue N00 = N0.getOperand(0);
2608349cc55cSDimitry Andric     SDValue N01 = N0.getOperand(1);
2609349cc55cSDimitry Andric 
2610349cc55cSDimitry Andric     // Shift amount needs to be uimm5.
2611349cc55cSDimitry Andric     if (N0.getMachineOpcode() == RISCV::SLLI &&
2612349cc55cSDimitry Andric         !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue()))
2613349cc55cSDimitry Andric       break;
2614349cc55cSDimitry Andric 
2615349cc55cSDimitry Andric     SDNode *Result =
2616349cc55cSDimitry Andric         CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0),
2617349cc55cSDimitry Andric                                N00, N01);
2618349cc55cSDimitry Andric     ReplaceUses(N, Result);
2619349cc55cSDimitry Andric     return true;
2620349cc55cSDimitry Andric   }
2621349cc55cSDimitry Andric   case RISCV::ADDW:
2622349cc55cSDimitry Andric   case RISCV::ADDIW:
2623349cc55cSDimitry Andric   case RISCV::SUBW:
2624349cc55cSDimitry Andric   case RISCV::MULW:
2625349cc55cSDimitry Andric   case RISCV::SLLIW:
2626*bdd1243dSDimitry Andric   case RISCV::PACKW:
2627349cc55cSDimitry Andric     // Result is already sign extended just remove the sext.w.
2628349cc55cSDimitry Andric     // NOTE: We only handle the nodes that are selected with hasAllWUsers.
2629349cc55cSDimitry Andric     ReplaceUses(N, N0.getNode());
2630349cc55cSDimitry Andric     return true;
2631349cc55cSDimitry Andric   }
2632349cc55cSDimitry Andric 
2633349cc55cSDimitry Andric   return false;
26340b57cec5SDimitry Andric }
26350b57cec5SDimitry Andric 
2636*bdd1243dSDimitry Andric // Return true if we can make sure mask of N is all-ones mask.
2637*bdd1243dSDimitry Andric static bool usesAllOnesMask(SDNode *N, unsigned MaskOpIdx) {
263881ad6265SDimitry Andric   // Check that we're using V0 as a mask register.
263981ad6265SDimitry Andric   if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) ||
264081ad6265SDimitry Andric       cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0)
264181ad6265SDimitry Andric     return false;
264281ad6265SDimitry Andric 
264381ad6265SDimitry Andric   // The glued user defines V0.
264481ad6265SDimitry Andric   const auto *Glued = N->getGluedNode();
264581ad6265SDimitry Andric 
264681ad6265SDimitry Andric   if (!Glued || Glued->getOpcode() != ISD::CopyToReg)
264781ad6265SDimitry Andric     return false;
264881ad6265SDimitry Andric 
264981ad6265SDimitry Andric   // Check that we're defining V0 as a mask register.
265081ad6265SDimitry Andric   if (!isa<RegisterSDNode>(Glued->getOperand(1)) ||
265181ad6265SDimitry Andric       cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0)
265281ad6265SDimitry Andric     return false;
265381ad6265SDimitry Andric 
265481ad6265SDimitry Andric   // Check the instruction defining V0; it needs to be a VMSET pseudo.
265581ad6265SDimitry Andric   SDValue MaskSetter = Glued->getOperand(2);
265681ad6265SDimitry Andric 
265781ad6265SDimitry Andric   const auto IsVMSet = [](unsigned Opc) {
265881ad6265SDimitry Andric     return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 ||
265981ad6265SDimitry Andric            Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 ||
266081ad6265SDimitry Andric            Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 ||
266181ad6265SDimitry Andric            Opc == RISCV::PseudoVMSET_M_B8;
266281ad6265SDimitry Andric   };
266381ad6265SDimitry Andric 
266481ad6265SDimitry Andric   // TODO: Check that the VMSET is the expected bitwidth? The pseudo has
266581ad6265SDimitry Andric   // undefined behaviour if it's the wrong bitwidth, so we could choose to
266681ad6265SDimitry Andric   // assume that it's all-ones? Same applies to its VL.
2667*bdd1243dSDimitry Andric   return MaskSetter->isMachineOpcode() &&
2668*bdd1243dSDimitry Andric          IsVMSet(MaskSetter.getMachineOpcode());
2669*bdd1243dSDimitry Andric }
2670*bdd1243dSDimitry Andric 
2671*bdd1243dSDimitry Andric // Optimize masked RVV pseudo instructions with a known all-ones mask to their
2672*bdd1243dSDimitry Andric // corresponding "unmasked" pseudo versions. The mask we're interested in will
2673*bdd1243dSDimitry Andric // take the form of a V0 physical register operand, with a glued
2674*bdd1243dSDimitry Andric // register-setting instruction.
2675*bdd1243dSDimitry Andric bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) {
2676*bdd1243dSDimitry Andric   const RISCV::RISCVMaskedPseudoInfo *I =
2677*bdd1243dSDimitry Andric       RISCV::getMaskedPseudoInfo(N->getMachineOpcode());
2678*bdd1243dSDimitry Andric   if (!I)
2679*bdd1243dSDimitry Andric     return false;
2680*bdd1243dSDimitry Andric 
2681*bdd1243dSDimitry Andric   unsigned MaskOpIdx = I->MaskOpIdx;
2682*bdd1243dSDimitry Andric 
2683*bdd1243dSDimitry Andric   if (!usesAllOnesMask(N, MaskOpIdx))
268481ad6265SDimitry Andric     return false;
268581ad6265SDimitry Andric 
268681ad6265SDimitry Andric   // Retrieve the tail policy operand index, if any.
2687*bdd1243dSDimitry Andric   std::optional<unsigned> TailPolicyOpIdx;
268881ad6265SDimitry Andric   const RISCVInstrInfo &TII = *Subtarget->getInstrInfo();
268981ad6265SDimitry Andric   const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode());
269081ad6265SDimitry Andric 
269181ad6265SDimitry Andric   bool IsTA = true;
269281ad6265SDimitry Andric   if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) {
2693*bdd1243dSDimitry Andric     TailPolicyOpIdx = getVecPolicyOpIdx(N, MaskedMCID);
269481ad6265SDimitry Andric     if (!(N->getConstantOperandVal(*TailPolicyOpIdx) &
269581ad6265SDimitry Andric           RISCVII::TAIL_AGNOSTIC)) {
269681ad6265SDimitry Andric       // Keep the true-masked instruction when there is no unmasked TU
269781ad6265SDimitry Andric       // instruction
269881ad6265SDimitry Andric       if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef())
269981ad6265SDimitry Andric         return false;
270081ad6265SDimitry Andric       // We can't use TA if the tie-operand is not IMPLICIT_DEF
270181ad6265SDimitry Andric       if (!N->getOperand(0).isUndef())
270281ad6265SDimitry Andric         IsTA = false;
270381ad6265SDimitry Andric     }
270481ad6265SDimitry Andric   }
270581ad6265SDimitry Andric 
270681ad6265SDimitry Andric   unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo;
270781ad6265SDimitry Andric 
270881ad6265SDimitry Andric   // Check that we're dropping the mask operand and any policy operand
270981ad6265SDimitry Andric   // when we transform to this unmasked pseudo. Additionally, if this insturtion
271081ad6265SDimitry Andric   // is tail agnostic, the unmasked instruction should not have a merge op.
271181ad6265SDimitry Andric   uint64_t TSFlags = TII.get(Opc).TSFlags;
271281ad6265SDimitry Andric   assert((IsTA != RISCVII::hasMergeOp(TSFlags)) &&
271381ad6265SDimitry Andric          RISCVII::hasDummyMaskOp(TSFlags) &&
271481ad6265SDimitry Andric          !RISCVII::hasVecPolicyOp(TSFlags) &&
271581ad6265SDimitry Andric          "Unexpected pseudo to transform to");
271681ad6265SDimitry Andric   (void)TSFlags;
271781ad6265SDimitry Andric 
271881ad6265SDimitry Andric   SmallVector<SDValue, 8> Ops;
271981ad6265SDimitry Andric   // Skip the merge operand at index 0 if IsTA
272081ad6265SDimitry Andric   for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) {
272181ad6265SDimitry Andric     // Skip the mask, the policy, and the Glue.
272281ad6265SDimitry Andric     SDValue Op = N->getOperand(I);
272381ad6265SDimitry Andric     if (I == MaskOpIdx || I == TailPolicyOpIdx ||
272481ad6265SDimitry Andric         Op.getValueType() == MVT::Glue)
272581ad6265SDimitry Andric       continue;
272681ad6265SDimitry Andric     Ops.push_back(Op);
272781ad6265SDimitry Andric   }
272881ad6265SDimitry Andric 
272981ad6265SDimitry Andric   // Transitively apply any node glued to our new node.
2730*bdd1243dSDimitry Andric   const auto *Glued = N->getGluedNode();
273181ad6265SDimitry Andric   if (auto *TGlued = Glued->getGluedNode())
273281ad6265SDimitry Andric     Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1));
273381ad6265SDimitry Andric 
273481ad6265SDimitry Andric   SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops);
2735*bdd1243dSDimitry Andric   Result->setFlags(N->getFlags());
273681ad6265SDimitry Andric   ReplaceUses(N, Result);
273781ad6265SDimitry Andric 
273881ad6265SDimitry Andric   return true;
273981ad6265SDimitry Andric }
274081ad6265SDimitry Andric 
2741*bdd1243dSDimitry Andric // Try to fold VMERGE_VVM with unmasked intrinsic to masked intrinsic. The
2742*bdd1243dSDimitry Andric // peephole only deals with VMERGE_VVM which is TU and has false operand same as
2743*bdd1243dSDimitry Andric // its true operand now. E.g. (VMERGE_VVM_M1_TU False, False, (VADD_M1 ...),
2744*bdd1243dSDimitry Andric // ...) -> (VADD_VV_M1_MASK)
2745*bdd1243dSDimitry Andric bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N, bool IsTA) {
2746*bdd1243dSDimitry Andric   unsigned Offset = IsTA ? 0 : 1;
2747*bdd1243dSDimitry Andric   uint64_t Policy = IsTA ? RISCVII::TAIL_AGNOSTIC : /*TUMU*/ 0;
2748*bdd1243dSDimitry Andric 
2749*bdd1243dSDimitry Andric   SDValue False = N->getOperand(0 + Offset);
2750*bdd1243dSDimitry Andric   SDValue True = N->getOperand(1 + Offset);
2751*bdd1243dSDimitry Andric   SDValue Mask = N->getOperand(2 + Offset);
2752*bdd1243dSDimitry Andric   SDValue VL = N->getOperand(3 + Offset);
2753*bdd1243dSDimitry Andric 
2754*bdd1243dSDimitry Andric   assert(True.getResNo() == 0 &&
2755*bdd1243dSDimitry Andric          "Expect True is the first output of an instruction.");
2756*bdd1243dSDimitry Andric 
2757*bdd1243dSDimitry Andric   // Need N is the exactly one using True.
2758*bdd1243dSDimitry Andric   if (!True.hasOneUse())
2759*bdd1243dSDimitry Andric     return false;
2760*bdd1243dSDimitry Andric 
2761*bdd1243dSDimitry Andric   if (!True.isMachineOpcode())
2762*bdd1243dSDimitry Andric     return false;
2763*bdd1243dSDimitry Andric 
2764*bdd1243dSDimitry Andric   unsigned TrueOpc = True.getMachineOpcode();
2765*bdd1243dSDimitry Andric 
2766*bdd1243dSDimitry Andric   // Skip if True has merge operand.
2767*bdd1243dSDimitry Andric   // TODO: Deal with True having same merge operand with N.
2768*bdd1243dSDimitry Andric   if (RISCVII::hasMergeOp(TII->get(TrueOpc).TSFlags))
2769*bdd1243dSDimitry Andric     return false;
2770*bdd1243dSDimitry Andric 
2771*bdd1243dSDimitry Andric   // Skip if True has side effect.
2772*bdd1243dSDimitry Andric   // TODO: Support velff and vlsegff.
2773*bdd1243dSDimitry Andric   if (TII->get(TrueOpc).hasUnmodeledSideEffects())
2774*bdd1243dSDimitry Andric     return false;
2775*bdd1243dSDimitry Andric 
2776*bdd1243dSDimitry Andric   // Only deal with True when True is unmasked intrinsic now.
2777*bdd1243dSDimitry Andric   const RISCV::RISCVMaskedPseudoInfo *Info =
2778*bdd1243dSDimitry Andric       RISCV::lookupMaskedIntrinsicByUnmaskedTA(TrueOpc);
2779*bdd1243dSDimitry Andric 
2780*bdd1243dSDimitry Andric   if (!Info)
2781*bdd1243dSDimitry Andric     return false;
2782*bdd1243dSDimitry Andric 
2783*bdd1243dSDimitry Andric   // The last operand of unmasked intrinsic should be sew or chain.
2784*bdd1243dSDimitry Andric   bool HasChainOp =
2785*bdd1243dSDimitry Andric       True.getOperand(True.getNumOperands() - 1).getValueType() == MVT::Other;
2786*bdd1243dSDimitry Andric 
2787*bdd1243dSDimitry Andric   if (HasChainOp) {
2788*bdd1243dSDimitry Andric     // Avoid creating cycles in the DAG. We must ensure that none of the other
2789*bdd1243dSDimitry Andric     // operands depend on True through it's Chain.
2790*bdd1243dSDimitry Andric     SmallVector<const SDNode *, 4> LoopWorklist;
2791*bdd1243dSDimitry Andric     SmallPtrSet<const SDNode *, 16> Visited;
2792*bdd1243dSDimitry Andric     LoopWorklist.push_back(False.getNode());
2793*bdd1243dSDimitry Andric     LoopWorklist.push_back(Mask.getNode());
2794*bdd1243dSDimitry Andric     LoopWorklist.push_back(VL.getNode());
2795*bdd1243dSDimitry Andric     if (SDNode *Glued = N->getGluedNode())
2796*bdd1243dSDimitry Andric       LoopWorklist.push_back(Glued);
2797*bdd1243dSDimitry Andric     if (SDNode::hasPredecessorHelper(True.getNode(), Visited, LoopWorklist))
2798*bdd1243dSDimitry Andric       return false;
2799*bdd1243dSDimitry Andric   }
2800*bdd1243dSDimitry Andric 
2801*bdd1243dSDimitry Andric   // Need True has same VL with N.
2802*bdd1243dSDimitry Andric   unsigned TrueVLIndex = True.getNumOperands() - HasChainOp - 2;
2803*bdd1243dSDimitry Andric   SDValue TrueVL = True.getOperand(TrueVLIndex);
2804*bdd1243dSDimitry Andric 
2805*bdd1243dSDimitry Andric   auto IsNoFPExcept = [this](SDValue N) {
2806*bdd1243dSDimitry Andric     return !this->mayRaiseFPException(N.getNode()) ||
2807*bdd1243dSDimitry Andric            N->getFlags().hasNoFPExcept();
2808*bdd1243dSDimitry Andric   };
2809*bdd1243dSDimitry Andric 
2810*bdd1243dSDimitry Andric   // Allow the peephole for non-exception True with VLMAX vector length, since
2811*bdd1243dSDimitry Andric   // all the values after VL of N are dependent on Merge. VLMAX should be
2812*bdd1243dSDimitry Andric   // lowered to (XLenVT -1).
2813*bdd1243dSDimitry Andric   if (TrueVL != VL && !(IsNoFPExcept(True) && isAllOnesConstant(TrueVL)))
2814*bdd1243dSDimitry Andric     return false;
2815*bdd1243dSDimitry Andric 
2816*bdd1243dSDimitry Andric   SDLoc DL(N);
2817*bdd1243dSDimitry Andric   unsigned MaskedOpc = Info->MaskedPseudo;
2818*bdd1243dSDimitry Andric   assert(RISCVII::hasVecPolicyOp(TII->get(MaskedOpc).TSFlags) &&
2819*bdd1243dSDimitry Andric          "Expected instructions with mask have policy operand.");
2820*bdd1243dSDimitry Andric   assert(RISCVII::hasMergeOp(TII->get(MaskedOpc).TSFlags) &&
2821*bdd1243dSDimitry Andric          "Expected instructions with mask have merge operand.");
2822*bdd1243dSDimitry Andric 
2823*bdd1243dSDimitry Andric   SmallVector<SDValue, 8> Ops;
2824*bdd1243dSDimitry Andric   Ops.push_back(False);
2825*bdd1243dSDimitry Andric   Ops.append(True->op_begin(), True->op_begin() + TrueVLIndex);
2826*bdd1243dSDimitry Andric   Ops.append({Mask, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)});
2827*bdd1243dSDimitry Andric   Ops.push_back(CurDAG->getTargetConstant(Policy, DL, Subtarget->getXLenVT()));
2828*bdd1243dSDimitry Andric 
2829*bdd1243dSDimitry Andric   // Result node should have chain operand of True.
2830*bdd1243dSDimitry Andric   if (HasChainOp)
2831*bdd1243dSDimitry Andric     Ops.push_back(True.getOperand(True.getNumOperands() - 1));
2832*bdd1243dSDimitry Andric 
2833*bdd1243dSDimitry Andric   // Result node should take over glued node of N.
2834*bdd1243dSDimitry Andric   if (N->getGluedNode())
2835*bdd1243dSDimitry Andric     Ops.push_back(N->getOperand(N->getNumOperands() - 1));
2836*bdd1243dSDimitry Andric 
2837*bdd1243dSDimitry Andric   SDNode *Result =
2838*bdd1243dSDimitry Andric       CurDAG->getMachineNode(MaskedOpc, DL, True->getVTList(), Ops);
2839*bdd1243dSDimitry Andric   Result->setFlags(True->getFlags());
2840*bdd1243dSDimitry Andric 
2841*bdd1243dSDimitry Andric   // Replace vmerge.vvm node by Result.
2842*bdd1243dSDimitry Andric   ReplaceUses(SDValue(N, 0), SDValue(Result, 0));
2843*bdd1243dSDimitry Andric 
2844*bdd1243dSDimitry Andric   // Replace another value of True. E.g. chain and VL.
2845*bdd1243dSDimitry Andric   for (unsigned Idx = 1; Idx < True->getNumValues(); ++Idx)
2846*bdd1243dSDimitry Andric     ReplaceUses(True.getValue(Idx), SDValue(Result, Idx));
2847*bdd1243dSDimitry Andric 
2848*bdd1243dSDimitry Andric   // Try to transform Result to unmasked intrinsic.
2849*bdd1243dSDimitry Andric   doPeepholeMaskedRVV(Result);
2850*bdd1243dSDimitry Andric   return true;
2851*bdd1243dSDimitry Andric }
2852*bdd1243dSDimitry Andric 
2853*bdd1243dSDimitry Andric // Transform (VMERGE_VVM_<LMUL>_TU false, false, true, allones, vl, sew) to
2854*bdd1243dSDimitry Andric // (VADD_VI_<LMUL>_TU false, true, 0, vl, sew). It may decrease uses of VMSET.
2855*bdd1243dSDimitry Andric bool RISCVDAGToDAGISel::performVMergeToVAdd(SDNode *N) {
2856*bdd1243dSDimitry Andric   unsigned NewOpc;
2857*bdd1243dSDimitry Andric   switch (N->getMachineOpcode()) {
2858*bdd1243dSDimitry Andric   default:
2859*bdd1243dSDimitry Andric     llvm_unreachable("Expected VMERGE_VVM_<LMUL>_TU instruction.");
2860*bdd1243dSDimitry Andric   case RISCV::PseudoVMERGE_VVM_MF8_TU:
2861*bdd1243dSDimitry Andric     NewOpc = RISCV::PseudoVADD_VI_MF8_TU;
2862*bdd1243dSDimitry Andric     break;
2863*bdd1243dSDimitry Andric   case RISCV::PseudoVMERGE_VVM_MF4_TU:
2864*bdd1243dSDimitry Andric     NewOpc = RISCV::PseudoVADD_VI_MF4_TU;
2865*bdd1243dSDimitry Andric     break;
2866*bdd1243dSDimitry Andric   case RISCV::PseudoVMERGE_VVM_MF2_TU:
2867*bdd1243dSDimitry Andric     NewOpc = RISCV::PseudoVADD_VI_MF2_TU;
2868*bdd1243dSDimitry Andric     break;
2869*bdd1243dSDimitry Andric   case RISCV::PseudoVMERGE_VVM_M1_TU:
2870*bdd1243dSDimitry Andric     NewOpc = RISCV::PseudoVADD_VI_M1_TU;
2871*bdd1243dSDimitry Andric     break;
2872*bdd1243dSDimitry Andric   case RISCV::PseudoVMERGE_VVM_M2_TU:
2873*bdd1243dSDimitry Andric     NewOpc = RISCV::PseudoVADD_VI_M2_TU;
2874*bdd1243dSDimitry Andric     break;
2875*bdd1243dSDimitry Andric   case RISCV::PseudoVMERGE_VVM_M4_TU:
2876*bdd1243dSDimitry Andric     NewOpc = RISCV::PseudoVADD_VI_M4_TU;
2877*bdd1243dSDimitry Andric     break;
2878*bdd1243dSDimitry Andric   case RISCV::PseudoVMERGE_VVM_M8_TU:
2879*bdd1243dSDimitry Andric     NewOpc = RISCV::PseudoVADD_VI_M8_TU;
2880*bdd1243dSDimitry Andric     break;
2881*bdd1243dSDimitry Andric   }
2882*bdd1243dSDimitry Andric 
2883*bdd1243dSDimitry Andric   if (!usesAllOnesMask(N, /* MaskOpIdx */ 3))
2884*bdd1243dSDimitry Andric     return false;
2885*bdd1243dSDimitry Andric 
2886*bdd1243dSDimitry Andric   SDLoc DL(N);
2887*bdd1243dSDimitry Andric   EVT VT = N->getValueType(0);
2888*bdd1243dSDimitry Andric   SDValue Ops[] = {N->getOperand(1), N->getOperand(2),
2889*bdd1243dSDimitry Andric                    CurDAG->getTargetConstant(0, DL, Subtarget->getXLenVT()),
2890*bdd1243dSDimitry Andric                    N->getOperand(4), N->getOperand(5)};
2891*bdd1243dSDimitry Andric   SDNode *Result = CurDAG->getMachineNode(NewOpc, DL, VT, Ops);
2892*bdd1243dSDimitry Andric   ReplaceUses(N, Result);
2893*bdd1243dSDimitry Andric   return true;
2894*bdd1243dSDimitry Andric }
2895*bdd1243dSDimitry Andric 
2896*bdd1243dSDimitry Andric bool RISCVDAGToDAGISel::doPeepholeMergeVVMFold() {
2897*bdd1243dSDimitry Andric   bool MadeChange = false;
2898*bdd1243dSDimitry Andric   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
2899*bdd1243dSDimitry Andric 
2900*bdd1243dSDimitry Andric   while (Position != CurDAG->allnodes_begin()) {
2901*bdd1243dSDimitry Andric     SDNode *N = &*--Position;
2902*bdd1243dSDimitry Andric     if (N->use_empty() || !N->isMachineOpcode())
2903*bdd1243dSDimitry Andric       continue;
2904*bdd1243dSDimitry Andric 
2905*bdd1243dSDimitry Andric     auto IsVMergeTU = [](unsigned Opcode) {
2906*bdd1243dSDimitry Andric       return Opcode == RISCV::PseudoVMERGE_VVM_MF8_TU ||
2907*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_MF4_TU ||
2908*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_MF2_TU ||
2909*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_M1_TU ||
2910*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_M2_TU ||
2911*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_M4_TU ||
2912*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_M8_TU;
2913*bdd1243dSDimitry Andric     };
2914*bdd1243dSDimitry Andric 
2915*bdd1243dSDimitry Andric     auto IsVMergeTA = [](unsigned Opcode) {
2916*bdd1243dSDimitry Andric       return Opcode == RISCV::PseudoVMERGE_VVM_MF8 ||
2917*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_MF4 ||
2918*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_MF2 ||
2919*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_M1 ||
2920*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_M2 ||
2921*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_M4 ||
2922*bdd1243dSDimitry Andric              Opcode == RISCV::PseudoVMERGE_VVM_M8;
2923*bdd1243dSDimitry Andric     };
2924*bdd1243dSDimitry Andric 
2925*bdd1243dSDimitry Andric     unsigned Opc = N->getMachineOpcode();
2926*bdd1243dSDimitry Andric     // The following optimizations require that the merge operand of N is same
2927*bdd1243dSDimitry Andric     // as the false operand of N.
2928*bdd1243dSDimitry Andric     if ((IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1)) ||
2929*bdd1243dSDimitry Andric         IsVMergeTA(Opc))
2930*bdd1243dSDimitry Andric       MadeChange |= performCombineVMergeAndVOps(N, IsVMergeTA(Opc));
2931*bdd1243dSDimitry Andric     if (IsVMergeTU(Opc) && N->getOperand(0) == N->getOperand(1))
2932*bdd1243dSDimitry Andric       MadeChange |= performVMergeToVAdd(N);
2933*bdd1243dSDimitry Andric   }
2934*bdd1243dSDimitry Andric   return MadeChange;
2935*bdd1243dSDimitry Andric }
2936*bdd1243dSDimitry Andric 
29370b57cec5SDimitry Andric // This pass converts a legalized DAG into a RISCV-specific DAG, ready
29380b57cec5SDimitry Andric // for instruction scheduling.
293981ad6265SDimitry Andric FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM,
294081ad6265SDimitry Andric                                        CodeGenOpt::Level OptLevel) {
294181ad6265SDimitry Andric   return new RISCVDAGToDAGISel(TM, OptLevel);
29420b57cec5SDimitry Andric }
2943*bdd1243dSDimitry Andric 
2944*bdd1243dSDimitry Andric char RISCVDAGToDAGISel::ID = 0;
2945*bdd1243dSDimitry Andric 
2946*bdd1243dSDimitry Andric INITIALIZE_PASS(RISCVDAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
2947