Lines Matching +full:1 +full:fa4
1546 // Don't allow VF=1 if those types are't legal.
1627 return SetRVVLoadStoreInfo(/*PtrOp*/ 1, /*IsStore*/ false,
1630 return SetRVVLoadStoreInfo(/*PtrOp*/ 1, /*IsStore*/ true,
1656 return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
1662 return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
1672 return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
1681 return SetRVVLoadStoreInfo(/*PtrOp*/ 1,
1856 case 1:
1956 ConstantInt *Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
2033 // ((1 >> Y) & 1) ==/!= 0
2036 // Is this '((1 >> Y) & 1)'?
2040 // Will this be '((1 >> Y) & 1)' after the transform?
2074 return Operand == 1;
2095 return Operand == 0 || Operand == 1;
2107 return Operand == 1;
2134 return Operand == 0 || Operand == 1;
2173 if (cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(1))
2222 // (1) `{0-31 value, false}` if FLI is available for Imm's type and FP value.
2227 // (3) `{-1, _}` if there is no way FLI can be used to materialize Imm.
2231 return std::make_pair(-1, false);
2244 return std::make_pair(-1, false);
2286 1 + RISCVMatInt::getIntMatCost(Imm.bitcastToAPInt(), Subtarget.getXLen(),
2363 return 1;
2386 // with 1/-1.
2393 isa<ConstantSDNode>(LHS.getOperand(1))) {
2394 uint64_t Mask = LHS.getConstantOperandVal(1);
2399 ShAmt = LHS.getValueSizeInBits() - 1 - Log2_64(Mask);
2417 // Convert X > -1 to X >= 0.
2418 if (C == -1) {
2425 // Convert X < 1 to 0 >= X.
2426 if (C == 1) {
2509 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
2538 // LMUL>1 register class down to the outgoing one. At each step we half
2598 unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
2603 return Op.getOperand(II->VLOperand + 1 + HasChain);
2705 // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
2853 // TODO: Here assume reciprocal throughput is 1 for LMUL_1, it is
2865 Cost = LMul <= DLenFactor ? (DLenFactor / LMul) : 1;
2913 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2959 // FIXME: Don't support narrowing by more than 1 steps for now.
2982 // Need to widen by more than 1 step, promote the FP type, then do a widening
3057 Mask = Op.getOperand(1);
3078 MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
3154 SDValue Src = Op.getOperand(1);
3173 Chain = Unorder.getValue(1);
3177 Chain = Src.getValue(1);
3188 MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
3230 Chain = Truncated.getValue(1);
3237 Chain = Truncated.getValue(1);
3268 MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
3360 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
3365 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
3415 // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
3421 // Normalize the step if it's greater than 1.
3427 IdxDiff = 1;
3482 SDValue Idx = SplatVal.getOperand(1);
3578 if (SDValue LastOp = Op->getOperand(Op->getNumOperands() - 1);
3579 !LastOp.isUndef() && ValueCounts[LastOp] == 1 &&
3597 if (ValueCounts[V] == 1) {
3734 if (StepNumerator != 1 && StepNumerator != INT64_MIN &&
3748 (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
3759 if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
3764 if (StepDenominator != 1) {
3794 (Subtarget.getRealMinVLen() >= VT.getSizeInBits() * NumElts) ? NumElts : 1;
3816 if (ViaVecLen != 1)
3818 MVT::getVectorVT(ViaIntVT, 1), Vec,
3826 // v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
3938 /// trades three scalar operations for 1 vector operation. Scalar
3991 NewOperands.push_back(pack(Op.getOperand(i), Op.getOperand(i + 1)));
4043 DAG.getConstant(1, DL, Splat.getValueType()));
4048 SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
4158 InstructionCost PerSlideCost = 1;
4277 isa<ConstantSDNode>(Hi.getOperand(1)) &&
4278 Hi.getConstantOperandVal(1) == 31)
4353 isNullConstant(Scalar.getOperand(1))) {
4383 DAG.getConstant(1, DL, XLenVT),
4402 // b) t35: v8i8 = vector_shuffle<1,3,5,7,9,11,13,15> t34, t33
4426 if (V1.getConstantOperandVal(1) != 0 ||
4427 V2.getConstantOperandVal(1) != Mask.size())
4431 if (Mask[0] != 0 && Mask[0] != 1)
4436 for (unsigned i = 1; i != Mask.size(); ++i)
4437 if (Mask[i] != Mask[i - 1] + 2)
4464 OddSrc = StartIndexes[1];
4486 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
4487 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
4488 /// 0 or 1 if a rotation is found.
4497 // [11, 12, 13, 14, 15, 0, 1, 2]
4498 // [-1, 12, 13, 14, -1, -1, 1, -1]
4499 // [-1, -1, -1, -1, -1, -1, 1, 2]
4501 // [-1, 4, 5, 6, -1, -1, 9, -1]
4502 // [-1, 4, 5, 6, -1, -1, -1, -1]
4504 LoSrc = -1;
4505 HiSrc = -1;
4515 return -1;
4526 return -1;
4529 int MaskSrc = M < Size ? 0 : 1;
4543 return -1;
4610 // t108: v8i8 = vector_shuffle<1,2,3,4,5,6,7,8> t49, t106
4617 // t54: v4i16 = vector_shuffle<1,2,3,4> t29, t30
4630 Offset += Parent.getConstantOperandVal(1);
4647 if (NewMask[i] == -1)
4665 for (unsigned i = 1; i != NewMask.size(); ++i)
4666 if (NewMask[i - 1] + 1 != NewMask[i])
4686 // vector_shuffle v8:v8i8, v9:v8i8, <0, 1, 2, 3, 8, 9, 10, 11>
4691 // vector_shuffle v8:v8i8, v9:v8i8 <0, 1, 8, 9, 10, 5, 6, 7>
4738 /// N-1 elements to make room for an inserted scalar at one end.
4755 // Return true if the mask could describe a slide of Mask.size() - 1
4767 bool IsVSlidedown = isSlideMask(Mask, OpsSwapped ? 0 : NumElts, 1);
4768 if (!IsVSlidedown && !isSlideMask(Mask, OpsSwapped ? 0 : NumElts, -1))
4771 const int InsertIdx = Mask[IsVSlidedown ? (NumElts - 1) : 0];
4859 // Then get OddV * by 2^(VecVT.getScalarSizeInBits() - 1)
4905 !SVN->getOperand(1).isUndef())
4910 *DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(), ViaEltSize), 1);
4965 // Given a shuffle mask like <3, 0, 1, 2, 7, 4, 5, 6> for v8i8, we can
5002 SDValue V2 = SVN->getOperand(1);
5024 OutMasks(VRegsPerSrc, {-1, {}});
5026 // Check if our mask can be done as a 1-to-1 mapping from source
5037 if (OutMasks[DstVecIdx].first == -1)
5045 OutMasks[DstVecIdx].second.resize(ElemsPerVReg, -1);
5062 if (SrcVecIdx == -1)
5081 SDValue V2 = Op.getOperand(1);
5261 // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
5316 if (IndexVT.getScalarType().bitsGT(MVT::i16) && isUInt<16>(NumElts - 1) &&
5317 (IndexVT.getSizeInBits() / Subtarget.getRealMinVLen()) > 1) {
5350 ? MaskIndex : -1);
5351 ShuffleMaskRHS.push_back(IsLHSOrUndefIndex ? -1 : (MaskIndex - NumElts));
5413 Mask = Op.getOperand(1);
5500 // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
5501 unsigned Adjust = ExponentBias + (EltSize - 1);
5525 SDValue Mask = Op->getOperand(1);
5547 // In this case, we can interpret poison as -1, so nothing to do further.
5550 // Convert -1 to VL.
5583 return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
5661 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
5694 SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
5716 SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
5725 if (isa<ConstantSDNode>(Op.getOperand(1)))
5731 SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
5737 SDValue Ovf = DAG.getSetCC(DL, Op.getValue(1).getValueType(), WideOp, SExt,
5748 SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
5753 SDValue Ovf = DAG.getSetCC(DL, Op.getValue(1).getValueType(), Mul, SExt,
5763 unsigned Check = Op.getConstantOperandVal(1);
5866 SDValue Y = Op.getOperand(1);
6196 auto [Lo, Hi] = DAG.SplitVector(Op.getOperand(1), DL);
6199 DAG.SplitEVL(Op.getOperand(3), Op.getOperand(1).getValueType(), DL);
6214 SDVTList LoVTs = DAG.getVTList(LoVT, Op->getValueType(1));
6215 SDVTList HiVTs = DAG.getVTList(HiVT, Op->getValueType(1));
6234 HiOperands[0] = LoRes.getValue(1);
6240 return DAG.getMergeValues({V, HiRes.getValue(1)}, DL);
6293 if (!isa<ConstantSDNode>(Op.getOperand(1)))
6350 EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
6363 EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6402 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
6407 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
6440 // We define our scalable vector types for lmul=1 to use a 64 bit known
6474 Op.getOperand(1).getValueType() == MVT::i32) {
6478 DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
6628 SDValue Chain = Int2FP.getValue(1);
6644 return DAG.getMergeValues({Res, FP2Int.getValue(1)}, DL);
6698 return DAG.getMergeValues({SubVec, Src.getValue(1)}, DL);
6805 if (Op.getOperand(1).getValueType() == MVT::nxv32f16 &&
6813 if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
6917 if (Op.getOperand(1).getValueType().isFixedLengthVector())
6931 SDValue Tmp2 = Op.getOperand(1);
6949 SDValue RHS = Op.getOperand(1);
6957 // convert this to the equivalent of (set(u)ge X, C+1) by using
6958 // (xori (slti(u) X, C+1), 1). This avoids materializing a small constant
6962 if (Imm != 0 && isInt<12>((uint64_t)Imm + 1)) {
6963 // If this is an unsigned compare and the constant is -1, incrementing
6965 if (CCVal == ISD::SETUGT && Imm == -1)
6970 DL, VT, LHS, DAG.getConstant(Imm + 1, DL, OpVT), CCVal);
7009 assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
7051 SDValue RHS = DAG.getFreeze(Op->getOperand(1));
7201 {WidenVec, Op.getOperand(1), Op.getOperand(2)});
7265 return emitFlushICache(DAG, Op.getOperand(0), Op.getOperand(1),
7540 SDValue RHS2 = Val.getOperand(1);
7562 SDValue TrueV = N->getOperand(1);
7568 // (select c, -1, y) -> -c | y
7573 // (select c, y, -1) -> (c-1) | y
7580 // (select c, 0, y) -> (c-1) & y
7608 SDValue RHS = CondV.getOperand(1);
7631 // being `0` or `-1`. In such cases we can replace `select` with `and`.
7643 SelOpNo = 1;
7644 Sel = BO->getOperand(1);
7650 unsigned ConstSelOpNo = 1;
7654 OtherSelOpNo = 1;
7661 SDValue ConstBinOp = BO->getOperand(SelOpNo ^ 1);
7670 if (SelOpNo == 1)
7671 std::swap(NewConstOps[0], NewConstOps[1]);
7684 if (SelOpNo == 1)
7685 std::swap(NewNonConstOps[0], NewNonConstOps[1]);
7688 SDValue NewT = (ConstSelOpNo == 1) ? NewConstOp : NewNonConstOp;
7689 SDValue NewF = (ConstSelOpNo == 1) ? NewNonConstOp : NewConstOp;
7695 SDValue TrueV = Op.getOperand(1);
7723 (TrueV.getOperand(0) == FalseV || TrueV.getOperand(1) == FalseV))
7729 (FalseV.getOperand(0) == TrueV || FalseV.getOperand(1) == TrueV))
7787 // (select cc, 0.0, 1.0) -> (sint_to_fp (zext (xor cc, 1)))
7795 DAG.getConstant(1, DL, XLenVT));
7820 SDValue RHS = CondV.getOperand(1);
7823 // Special case for a select of 2 constants that have a diffence of 1.
7834 if (TrueVal - 1 == FalseVal)
7836 if (TrueVal + 1 == FalseVal)
7841 // 1 < x ? x : 1 -> 0 < x ? x : 1
7852 // x <s -1 ? x : -1 -> x <s 0 ? x : -1
7872 SDValue CondV = Op.getOperand(1);
7879 SDValue RHS = CondV.getOperand(1);
7905 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
7966 SDValue Hi = Op.getOperand(1);
7972 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
7978 SDValue One = DAG.getConstant(1, DL, VT);
7980 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
8005 SDValue Hi = Op.getOperand(1);
8011 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - ShAmt))
8015 // Hi = Hi >>s (XLEN-1)
8019 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - ShAmt))
8028 SDValue One = DAG.getConstant(1, DL, VT);
8030 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
8071 DAG.getConstant(1, DL, SplatVal.getValueType()));
8090 SDValue Hi = Op.getOperand(1);
8107 // Custom-lower extensions from mask vectors by using a vselect either with 1
8108 // for zero/any-extension or -1 for sign-extension:
8109 // (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
8178 // (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
8191 Mask = Op.getOperand(1);
8212 SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
8260 Mask = Op.getOperand(1);
8298 SDValue Src = Op.getOperand(1);
8325 Chain = Src.getValue(1);
8337 Res = DAG.getMergeValues({SubVec, Res.getValue(1)}, DL);
8374 Mask = Op.getOperand(1);
8442 // By limiting the active vector length to index+1 and merging with the
8445 // at VL-1, which is replaced with the desired value.
8451 SDValue Val = Op.getOperand(1);
8592 DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
8597 Idx->getAsZExtVal() + 1 == VecVT.getVectorNumElements())
8617 SDValue Idx = Op.getOperand(1);
8652 WidenVecLen = 1;
8665 DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
8675 DAG.getConstant(1, DL, XLenVT));
8717 MaxIdx = VecVT.getVectorNumElements() - 1;
8747 // Use a VL of 1 to avoid processing more elements than we need.
8748 auto [Mask, VL] = getDefaultVLOps(1, ContainerVT, DL, DAG, Subtarget);
8777 unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
8786 unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
8817 MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
8886 DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
8897 Passthru = DAG.getBitcast(I32VT, Operands[1]);
8919 SDValue MaskedOff = Operands[1];
8921 uint64_t Policy = Operands[NumOps - 1]->getAsZExtVal();
8960 // Determine the VF that corresponds to LMUL 1 for ElementWidth.
8962 // We don't support VF==1 with ELEN==32.
8980 SDValue AVL = DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
8990 SDValue Op0 = N->getOperand(1);
9004 // Convert -1 to VL.
9019 unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
9027 unsigned SplatOp = II->ScalarOperand + 1;
9114 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
9119 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
9128 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
9136 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2),
9143 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
9148 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
9154 return DAG.getNode(RISCVISD::MOPR, DL, XLenVT, Op.getOperand(1),
9161 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
9169 return DAG.getNode(RISCVISD::MOPRR, DL, XLenVT, Op.getOperand(1),
9175 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
9181 return DAG.getNode(RISCVISD::CLMUL, DL, XLenVT, Op.getOperand(1),
9189 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
9202 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
9209 SDValue Res = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Op.getOperand(1));
9214 Op.getOperand(1), DAG.getVectorIdxConstant(0, DL));
9216 return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
9221 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
9228 Op.getOperand(1), Scalar, Op.getOperand(3));
9249 SDValue Vec = Op.getOperand(1);
9253 if (Op.getOperand(1).isUndef())
9271 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
9288 !isValidEGW(4, Op->getOperand(1).getSimpleValueType(), Subtarget) ||
9297 !isValidEGW(8, Op->getOperand(1).getSimpleValueType(), Subtarget))
9309 !isValidEGW(4, Op->getOperand(1).getSimpleValueType(), Subtarget) ||
9361 Operands.erase(Operands.begin() + 1);
9382 SDValue Chain = NewNode.getValue(1);
9397 Operands.erase(Operands.begin() + 1);
9405 unsigned IntNo = Op.getConstantOperandVal(1);
9445 Chain = ScalarLoad.getValue(1);
9451 Chain = ScalarLoad.getValue(1);
9478 Chain = Result.getValue(1);
9497 unsigned NF = Op->getNumValues() - 1;
9559 unsigned IntNo = Op.getConstantOperandVal(1);
9715 SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
9768 // ((vcpop x) & 1) != 0
9769 SDValue One = DAG.getConstant(1, DL, XLenVT);
9788 // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
9797 (ImmAVL && ImmAVL->getZExtValue() >= 1);
9817 auto InnerVL = NonZeroAVL ? VL : DAG.getConstant(1, DL, XLenVT);
9900 return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
9966 SDValue Vec = Op.getOperand(1);
10014 SDValue SubVec = Op.getOperand(1);
10130 // we have a fixed length subvector, we need to adjust the index by 1/vscale.
10155 // 1. If the Idx has been completely eliminated and this subvector's size is
10161 // We do this by lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1
10164 // of that LMUL=1 type back into the larger vector (resolving to another
10166 // LMUL=1 type to avoid allocating a large register group to hold our
10256 unsigned OrigIdx = Op.getConstantOperandVal(1);
10290 Op.getOperand(1));
10316 unsigned LastIdx = OrigIdx + SubVecVT.getVectorNumElements() - 1;
10353 // we have a fixed length subvector, we need to adjust the index by 1/vscale.
10392 // If VecVT has an LMUL > 1, then SubVecVT should have a smaller LMUL, and
10445 if (TruncVals.size() > 1)
10458 // 1 bit element vectors need to be widened to e8
10466 auto [Op1Lo, Op1Hi] = DAG.SplitVectorOperand(Op.getNode(), 1);
10476 SDValue Odd = DAG.getNode(ISD::CONCAT_VECTORS, DL, VecVT, ResLo.getValue(1),
10477 ResHi.getValue(1));
10486 Op.getOperand(0), Op.getOperand(1));
10507 // Create a vector of odd indices {1, 3, 5, ... }
10509 DAG.getNode(ISD::ADD, DL, IdxVT, EvenIdx, DAG.getConstant(1, DL, IdxVT));
10544 auto [Op1Lo, Op1Hi] = DAG.SplitVectorOperand(Op.getNode(), 1);
10553 ResLo.getValue(0), ResLo.getValue(1));
10555 ResHi.getValue(0), ResHi.getValue(1));
10564 Interleaved = getWideningInterleave(Op.getOperand(0), Op.getOperand(1), DL,
10572 Op.getOperand(0), Op.getOperand(1));
10576 // 0 1 2 3 4 5 6 7 ...
10579 // 1 1 1 1 1 1 1 1 ...
10580 SDValue Ones = DAG.getSplatVector(IdxVT, DL, DAG.getConstant(1, DL, XLenVT));
10582 // 1 0 1 0 1 0 1 0 ...
10592 // 0 0 1 1 2 2 3 3 ...
10594 // 0 n 1 n+1 2 n+2 3 n+3 ...
10599 // v[0] v[n] v[1] v[n+1] v[2] v[n+2] v[3] v[n+3] ...
10626 if (StepValImm != 1) {
10643 // subtracting the id of each element from (VLMAX-1). This will convert
10645 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
10698 // Calculate VLMAX-1 for the desired SEW.
10701 DAG.getConstant(1, DL, XLenVT));
10703 // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
10725 SDValue V2 = Op.getOperand(1);
10782 return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL);
10801 return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL);
10914 Chain = Result.getValue(1);
10998 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
11017 SDValue Op1 = Op.getOperand(1);
11037 Tmp1.getValue(1), Tmp2.getValue(1));
11050 return DAG.getMergeValues({Res, OEQ.getValue(1)}, DL);
11095 return DAG.getMergeValues({SubVec, Res.getValue(1)}, DL);
11117 Mask = Op->getOperand(1);
11143 SDValue Sign = Op.getOperand(1);
11170 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
11224 return DAG.getMergeValues({SubVec, ScalableRes.getValue(1)}, DL);
11314 Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1, DL, XLenVT);
11331 SDValue Op2 = Op.getOperand(1);
11361 // X >s Y --> X == 0 & Y == 1 --> ~X & Y
11362 // X <u Y --> X == 0 & Y == 1 --> ~X & Y
11370 // X <s Y --> X == 1 & Y == 0 --> ~Y & X
11371 // X >u Y --> X == 1 & Y == 0 --> ~Y & X
11379 // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
11380 // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
11388 // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
11389 // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
11410 SDValue Mask = Op.getOperand(1);
11437 if (SrcEltSize == 1) {
11444 RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT);
11497 if (DstEltSize == 1) {
11505 // Compare the integer result to 0. The integer should be 0 or 1/-1,
11544 SDValue Op2 = Op.getOperand(1);
11571 DAG.getConstant(1, DL, XLenVT), EVL1);
11580 DAG.getConstant(1, DL, XLenVT), EVL2);
11626 SDValue Mask = Op.getOperand(1);
11653 SDValue Mask = Op.getOperand(1);
11674 DAG.getConstant(1, DL, XLenVT), EVL);
11745 DAG.getNode(ISD::SUB, DL, XLenVT, EVL, DAG.getConstant(1, DL, XLenVT));
11774 SDValue Op2 = Op->getOperand(1);
11830 SDValue Chain = Result.getValue(1);
11966 Chain = Result.getValue(1);
12096 SDValue RMValue = Op->getOperand(1);
12169 SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
12180 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
12203 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
12215 Chain = Op0.getValue(1);
12224 Results.push_back(Res.getValue(1));
12316 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
12333 Results.push_back(Res.getValue(1));
12343 SDValue RHS = N->getOperand(1);
12386 if (N->getOperand(1).getOpcode() != ISD::Constant) {
12403 DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
12418 if (!isa<ConstantSDNode>(N->getOperand(1)) &&
12449 if (N->getOperand(1).getOpcode() == ISD::Constant &&
12470 if (!isa<ConstantSDNode>(N->getOperand(1)))
12474 SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
12487 EVT OType = N->getValueType(1);
12504 SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
12512 // Special case uaddo X, 1 overflowed if the addition result is 0.
12517 Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
12520 // Special case uaddo X, -1 overflowed if X != 0.
12521 Overflow = DAG.getSetCC(DL, N->getValueType(1), N->getOperand(0),
12528 Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
12547 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
12626 NewReg.getValue(0), NewReg.getValue(1));
12634 EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
12667 SDValue Idx = N->getOperand(1);
12690 // Use a VL of 1 to avoid processing more elements than we need.
12691 auto [Mask, VL] = getDefaultVLOps(1, ContainerVT, DL, DAG, Subtarget);
12757 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
12767 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
12779 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
12790 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
12804 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
12826 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
12847 Subtarget.getXLenVT(), N->getOperand(1));
12856 SDValue Vec = N->getOperand(1);
12864 auto [Mask, VL] = getDefaultVLOps(1, VecVT, DL, DAG, Subtarget);
12906 Results.push_back(Res.getValue(1));
12977 SDValue RHS = N->getOperand(1);
12986 !isa<ConstantSDNode>(RHS.getOperand(1)))
12989 uint64_t RHSIdx = cast<ConstantSDNode>(RHS.getOperand(1))->getLimitedValue();
12999 // match binop (extract_vector_elt V, 0), (extract_vector_elt V, 1) to
13004 LHS.getOperand(0) == SrcVec && isa<ConstantSDNode>(LHS.getOperand(1))) {
13006 cast<ConstantSDNode>(LHS.getOperand(1))->getLimitedValue();
13007 if (0 == std::min(LHSIdx, RHSIdx) && 1 == std::max(LHSIdx, RHSIdx)) {
13024 isNullConstant(ReduceVec.getOperand(1)) &&
13030 EVT ReduceVT = EVT::getVectorVT(*DAG.getContext(), VT, RHSIdx + 1);
13076 isNullConstant(V.getOperand(1)) &&
13084 else if (IsReduction(N->getOperand(1), Opc))
13085 ReduceIdx = 1;
13103 ScalarV = ScalarV.getOperand(1);
13105 // Make sure that ScalarV is a splat with VL=1.
13116 if (!isNeutralConstant(N->getOpcode(), N->getFlags(), ScalarV.getOperand(1),
13125 SDValue NewStart = N->getOperand(1 - ReduceIdx);
13138 SDValue Ops[] = {Reduce.getOperand(0), Reduce.getOperand(1),
13144 Extract.getOperand(1));
13148 // (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
13162 SDValue N1 = N->getOperand(1);
13168 auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
13169 auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
13180 if (Diff != 1 && Diff != 2 && Diff != 3)
13194 // (and (select cond, -1, c), x)
13195 // -> (select cond, x, (and x, c)) [AllOnes=1]
13240 SDValue TrueVal = Slct.getOperand(1 + OpOffset);
13261 {Slct.getOperand(0), Slct.getOperand(1),
13273 SDValue N1 = N->getOperand(1);
13287 // (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
13288 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
13289 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
13292 // (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
13293 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
13294 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
13310 auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
13311 auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
13322 if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
13329 } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
13330 isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
13331 CA = C1 / C0 + 1;
13333 } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
13334 isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
13335 CA = C1 / C0 - 1;
13366 SDValue N1 = N->getOperand(1);
13400 // Try to turn (add (xor bool, 1) -1) into (neg bool).
13403 SDValue N1 = N->getOperand(1);
13407 // RHS should be -1.
13411 // Look for (xor X, 1).
13412 if (N0.getOpcode() != ISD::XOR || !isOneConstant(N0.getOperand(1)))
13415 // First xor input should be 0 or 1.
13416 APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), 1);
13451 SDValue N1 = N->getOperand(1);
13460 // All our optimizations involve subtracting 1 from the immediate and forming
13462 APInt ImmValMinus1 = N0C->getAPIntValue() - 1;
13469 // (add (setcc x, y, neq/eq), constant - 1)
13476 DAG.getSetCC(SDLoc(N1), VT, N1.getOperand(0), N1.getOperand(1), CCVal);
13477 } else if (N1.getOpcode() == ISD::XOR && isOneConstant(N1.getOperand(1)) &&
13479 // (sub C, (xor (setcc), 1)) -> (add (setcc), C-1).
13480 // Since setcc returns a bool the xor is equivalent to 1-setcc.
13502 SDValue N1 = N->getOperand(1);
13507 auto *ShAmtC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
13525 SDValue N1 = N->getOperand(1);
13526 // fold (sub 0, (setcc x, 0, setlt)) -> (sra x, xlen - 1)
13528 isNullConstant(N1.getOperand(1))) {
13532 unsigned ShAmt = N0.getValueSizeInBits() - 1;
13548 // Apply DeMorgan's law to (and/or (xor X, 1), (xor Y, 1)) if X and Y are 0/1.
13553 SDValue N1 = N->getOperand(1);
13562 SDValue N01 = N0.getOperand(1);
13563 SDValue N11 = N1.getOperand(1);
13565 // For AND, SimplifyDemandedBits may have turned one of the (xor X, 1) into
13566 // (xor X, -1) based on the upper bits of the other operand being 0. If the
13567 // operation is And, allow one of the Xors to use -1.
13572 // N01 and N11 being 1 was already handled. Handle N11==1 and N01==-1.
13583 // The LHS of the xors needs to be 0/1.
13584 APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), 1);
13592 return DAG.getNode(ISD::XOR, DL, VT, Logic, DAG.getConstant(1, DL, VT));
13614 SDValue True = N0.getOperand(1);
13627 SDValue CondRHS = Cond.getOperand(1);
13636 if (!CondRHSC || CondRHSC->getAPIntValue() != (1ULL << ScalarBits))
13647 ConstantSDNode *FalseRHSC = isConstOrConstSplat(False.getOperand(1));
13661 DAG.getConstant((1ULL << ScalarBits) - 1, DL, SrcVT));
13677 !isa<ConstantSDNode>(N0.getOperand(1)) && N0.hasOneUse()) {
13680 SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
13697 // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero
13703 N->getValueType(0) == MVT::i32 && isOneConstant(N->getOperand(1)) &&
13704 N0.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(N0.getOperand(1)) &&
13708 SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
13711 DAG.getConstant(1, DL, MVT::i64));
13724 // fold (and (select lhs, rhs, cc, -1, y), x) ->
13729 // Try to pull an xor with 1 through a select idiom that uses czero_eqz/nez.
13741 SDValue Cond = N0.getOperand(1);
13742 if (Cond != N1.getOperand(1))
13749 TrueV.getOperand(1) != FalseV.getOperand(1) ||
13750 !isOneConstant(TrueV.getOperand(1)) ||
13762 return DAG.getNode(ISD::XOR, DL, VT, NewOr, TrueV.getOperand(1));
13781 SDValue N1 = N->getOperand(1);
13795 SDValue N1 = N->getOperand(1);
13797 // Pre-promote (i32 (xor (shl -1, X), ~0)) on RV64 with Zbs so we can use
13798 // (ADDI (BSET X0, X), -1). If we wait until/ type legalization, we'll create
13803 !isa<ConstantSDNode>(N0.getOperand(1)) && N0.hasOneUse()) {
13806 SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
13812 // fold (xor (sllw 1, x), -1) -> (rolw ~1, x)
13820 DAG.getConstant(~1, DL, MVT::i64), N0.getOperand(1));
13823 // Fold (xor (setcc constant, y, setlt), 1) -> (setcc y, constant + 1, setlt)
13831 if ((Imm + 1).isSignedIntN(12))
13832 return DAG.getSetCC(DL, VT, N0.getOperand(1),
13833 DAG.getConstant(Imm + 1, DL, VT), CC);
13837 // Combine (xor (trunc (X cc Y)) 1) -> (trunc (X !cc Y)). This is needed with
13845 SDValue RHS = N00.getOperand(1);
13884 ConstantSDNode *CNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
13906 if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) &&
13907 X.getConstantOperandVal(1) == UINT64_C(0xffffffff)) {
13911 DAG.getConstant(Log2_64(Divisor - 1), DL, VT),
13918 DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
13928 DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
13930 DAG.getConstant(Log2_64(MulAmt2 - 1), DL, VT),
13938 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
13940 if (ScaleShift >= 1 && ScaleShift < 4) {
13941 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
13950 // 2^(1,2,3) * 3,5,9 + 1 -> (shXadd (shYadd x, x), x)
13953 // (2^(1,2,3) * 3,5,9 + 1) << C2
13954 // 2^(C1>3) * 3,5,9 +/- 1
13956 uint64_t C = MulAmt - 1;
13960 if ((C >> TZ) == Divisor && (TZ == 1 || TZ == 2 || TZ == 3)) {
13964 DAG.getConstant(Log2_64(Divisor - 1), DL, VT), X);
13970 // 2^n + 2/4/8 + 1 -> (add (shl X, C1), (shXadd X, X))
13971 if (MulAmt > 2 && isPowerOf2_64((MulAmt - 1) & (MulAmt - 2))) {
13972 unsigned ScaleShift = llvm::countr_zero(MulAmt - 1);
13973 if (ScaleShift >= 1 && ScaleShift < 4) {
13974 unsigned ShiftAmt = Log2_64(((MulAmt - 1) & (MulAmt - 2)));
13993 DAG.getConstant(Log2_64(Offset - 1), DL, VT), X);
14034 if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), V1) ||
14035 !ISD::isConstantSplatVector(And.getOperand(1).getNode(), V2) ||
14036 !ISD::isConstantSplatVector(Srl.getOperand(1).getNode(), V3))
14040 if (!V1.isMask(HalfSize) || V2 != (1ULL | 1ULL << HalfSize) ||
14041 V3 != (HalfSize - 1))
14050 DAG.getConstant(HalfSize - 1, DL, HalfVT));
14063 SDValue N1 = N->getOperand(1);
14067 // vmadd: (mul (add x, 1), y) -> (add (mul x, y), y)
14068 // (mul x, add (y, 1)) -> (add x, (mul x, y))
14069 // vnmsub: (mul (sub 1, x), y) -> (sub y, (mul x, y))
14070 // (mul x, (sub 1, y)) -> (sub x, (mul x, y))
14074 SDValue Opnd = V->getOperand(1);
14146 SDValue N1 = N.getOperand(1);
14177 SDValue N1 = N->getOperand(1);
14191 !isa<ConstantSDNode>(N0.getOperand(1)) ||
14192 N0.getConstantOperandVal(1) != UINT64_C(0xffffffff))
14209 // If the constant is larger than 2^32 - 1 it is impossible for both sides
14228 cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
14240 enum ExtKind : uint8_t { ZExt = 1 << 0, SExt = 1 << 1, FPExt = 1 << 2 };
14349 DAG.getUNDEF(NarrowVT), Source.getOperand(1), VL);
14351 Source = Source.getOperand(1);
14503 : OrigOperand.getOperand(1);
14589 SDValue Op = OrigOperand.getOperand(1);
14666 if (OperandIdx == 1) {
15005 NodeExtensionHelper RHS(Root, 1, DAG, Subtarget);
15027 (Attempt != 1 + NodeExtensionHelper::isCommutative(Root)) && !Matched;
15087 SDValue MergeOp = N->getOperand(1);
15093 SDValue X = MergeOp->getOperand(1);
15113 Z = Z.getOperand(1);
15177 DAG.getMergeValues({Res.getValue(1), Res.getValue(2)}, SDLoc(LSNode2));
15186 {LSNode1->getChain(), LSNode1->getOperand(1), LSNode2->getOperand(1),
15209 unsigned OpNum = LSNode1->getOpcode() == ISD::LOAD ? 1 : 2;
15218 if (auto *C1 = dyn_cast<ConstantSDNode>(Ptr->getOperand(1)))
15414 EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
15513 SDValue B = N->getOperand(1 + Offset);
15519 if (V.getOpcode() == RISCVISD::FNEG_VL && V.getOperand(1) == Mask &&
15560 SDValue Op1 = N->getOperand(1);
15575 if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL ||
15576 Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
15611 if (!isa<ConstantSDNode>(N->getOperand(1)))
15613 uint64_t ShAmt = N->getConstantOperandVal(1);
15624 cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i32 &&
15626 isa<ConstantSDNode>(N0.getOperand(0).getOperand(1))) {
15627 uint64_t LShAmt = N0.getOperand(0).getConstantOperandVal(1);
15652 AddC = dyn_cast<ConstantSDNode>(N0.getOperand(IsAdd ? 1 : 0));
15665 !isa<ConstantSDNode>(U->getOperand(1)) ||
15666 U->getConstantOperandVal(1) > 32)
15670 Shl = N0.getOperand(IsAdd ? 0 : 1);
15677 if (Shl.getOpcode() != ISD::SHL || !isa<ConstantSDNode>(Shl.getOperand(1)) ||
15678 Shl.getConstantOperandVal(1) != 32)
15712 // Invert (and/or (set cc X, Y), (xor Z, 1)) to (or/and (set !cc X, Y)), Z) if
15714 // inverting the setcc is free, and Z is 0/1. Caller will invert the
15725 SDValue Xor = Cond.getOperand(1);
15735 // (xor Z, 1) to (not Z).
15736 SDValue Xor1 = Xor.getOperand(1);
15743 // The LHS of the xor needs to be 0/1.
15744 APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), 1);
15757 Setcc.getOperand(1), CCVal);
15759 // Invert (setlt 0, X) by converting to (setlt X, 1).
15760 Setcc = DAG.getSetCC(SDLoc(Setcc), VT, Setcc.getOperand(1),
15761 DAG.getConstant(1, SDLoc(Setcc), VT), CCVal);
15762 } else if (CCVal == ISD::SETLT && isOneConstant(Setcc.getOperand(1))) {
15763 // (setlt X, 1) by converting to (setlt 0, X).
15803 RHS = LHS.getOperand(1);
15813 RHS = LHS.getOperand(1);
15818 // Fold ((srl (and X, 1<<C), C), 0, eq/ne) -> ((shl X, XLen-1-C), 0, ge/lt)
15820 LHS.getOperand(1).getOpcode() == ISD::Constant) {
15823 LHS0.getOperand(1).getOpcode() == ISD::Constant) {
15824 uint64_t Mask = LHS0.getConstantOperandVal(1);
15825 uint64_t ShAmt = LHS.getConstantOperandVal(1);
15830 ShAmt = LHS.getValueSizeInBits() - 1 - ShAmt;
15841 // (X, 1, setne) -> // (X, 0, seteq) if we can prove X is 0/1.
15843 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
15894 else if (Commutative && FalseVal == TrueVal.getOperand(1))
15895 OpToFold = 1;
15901 SDValue OtherOp = TrueVal.getOperand(1 - OpToFold);
15929 if (!isNullConstant(Cond->getOperand(1)))
15935 ValOnZero = N->getOperand(1);
15937 CountZeroes = N->getOperand(1);
15970 DAG.getConstant(BitWidth - 1, SDLoc(N), CountZeroes.getValueType());
15980 SDValue True = N->getOperand(1);
15994 SDValue RHS = Cond.getOperand(1);
15997 isa<ConstantSDNode>(LHS.getOperand(1)) && isNullConstant(RHS)) {
15998 const APInt &MaskVal = LHS.getConstantOperandAPInt(1);
16019 SDValue TrueVal = N->getOperand(1);
16039 if (VT.getVectorNumElements() == 1)
16068 // (e.g. x + 0, a * 1 or a << 0), but we then have to keep track
16074 if (!isa<ConstantSDNode>(Op.getOperand(1)) &&
16075 !isa<ConstantFPSDNode>(Op.getOperand(1)))
16079 if (Op.getOperand(0).getValueType() != Op.getOperand(1).getValueType())
16082 RHSOps.push_back(Op.getOperand(1));
16093 SDValue InVal = N->getOperand(1);
16111 SDValue InVecRHS = InVec->getOperand(1);
16113 SDValue InValRHS = InVal->getOperand(1);
16121 if (InVec.getOperand(0).getValueType() != InVec.getOperand(1).getValueType())
16213 return {{P2.getOperand(1), false}};
16215 return {{P1.getOperand(1), true}};
16221 auto BaseDiff = GetPtrDiff(Lds[0], Lds[1]);
16226 for (auto *It = Lds.begin() + 1; It != Lds.end() - 1; It++)
16258 DAG.getConstant(1, DL, MVT::i1));
16263 // total size = (elsize * n) + (stride - elsize) * (n-1)
16264 // = elsize + stride * (n-1)
16266 ConstStride->getSExtValue() * (N->getNumOperands() - 1);
16295 SDValue MulOp = N->getOperand(1);
16342 static_assert(RISCVISD::VWMACC_VL + 1 == RISCVISD::VWMACCU_VL,
16346 static_assert(RISCVISD::VWMUL_VL + 1 == RISCVISD::VWMULU_VL,
16353 SDValue Ops[] = {MulOp.getOperand(0), MulOp.getOperand(1), Addend, AddMask,
16460 uint64_t Last = Index->getConstantOperandVal(i-1);
16467 // trunc (sra sext (X), zext (Y)) -> sra (X, smin (Y, scalarsize(Y) - 1))
16474 SDValue Mask = N->getOperand(1);
16486 V.getOperand(1) == Mask && V.getOperand(2) == VL;
16503 SDValue N1 = Op.getOperand(1);
16515 unsigned MaxShAmt = N10.getValueType().getScalarSizeInBits() - 1;
16533 SDValue Mask = N->getOperand(1);
16543 SDValue Op = V.getOperand(1);
16548 Op.getOperand(1).getValueType().isFixedLengthVector() &&
16549 Op.getOperand(1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
16550 Op.getOperand(1).getOperand(0).getValueType() == Op.getValueType() &&
16551 isNullConstant(Op.getOperand(1).getOperand(1)))
16552 Op = Op.getOperand(1).getOperand(0);
16559 if (auto *Op1 = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
16596 V.getOperand(1), DAG.getUNDEF(V.getValueType()),
16628 Src.getOperand(1) == Mask && Src.getOperand(2) == VL &&
16685 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
16714 SDValue Hi = NewSplitF64.getValue(1);
16733 SimplifyDemandedLowBitsHelper(1, 5))
16862 isTypeLegal(Src.getOperand(1).getValueType())) {
16865 Src.getOperand(0), Src.getOperand(1));
16867 DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
16884 SDValue Cond = N->getOperand(1);
16895 // czero_eqz X, (xor Y, 1) -> czero_nez X, Y if Y is 0 or 1.
16896 // czero_nez X, (xor Y, 1) -> czero_eqz X, Y if Y is 0 or 1.
16897 if (Cond.getOpcode() == ISD::XOR && isOneConstant(Cond.getOperand(1))) {
16899 APInt Mask = APInt::getBitsSetFrom(NewCond.getValueSizeInBits(), 1);
16907 if (Cond.getOpcode() == ISD::SETCC && isNullConstant(Cond.getOperand(1))) {
16918 SDValue RHS = N->getOperand(1);
16930 // (select (x < 0), y, z) -> x >> (XLEN - 1) & (y - z) + z
16931 // (select (x >= 0), y, z) -> x >> (XLEN - 1) & (z - y) + y
16946 DAG.getConstant(Subtarget.getXLen() - 1, DL, VT));
16962 // (select c, -1, y) -> -c | y
16968 // (select c, y, -1) -> -!c | y
16989 // (riscvisd::select_cc x, 0, ne, x, 1) -> (add x, (setcc x, 0, eq))
16990 // (riscvisd::select_cc x, 0, eq, 1, x) -> (add x, (setcc x, 0, eq))
17003 // If both true/false are an xor with 1, pull through the select.
17008 TrueV.getOperand(1) == FalseV.getOperand(1) &&
17009 isOneConstant(TrueV.getOperand(1)) &&
17013 return DAG.getNode(ISD::XOR, DL, VT, NewSel, TrueV.getOperand(1));
17019 SDValue LHS = N->getOperand(1);
17045 // TRUNC=1.
17046 SDValue In2 = N->getOperand(1);
17052 (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
17092 SimpleVID && SimpleVID->StepDenominator == 1) {
17114 return DAG.getMergeValues({VPSelect, SDValue(StridedLoad.getNode(), 1)},
17129 return DAG.getMergeValues({Shuffle, Load.getValue(1)}, DL);
17151 SDValue Mask = DAG.getSplat(MaskVT, DL, DAG.getConstant(1, DL, MVT::i1));
17159 return DAG.getMergeValues({Result, Gather.getValue(1)}, DL);
17254 SDValue ShAmt = N->getOperand(1);
17261 ShAmt.getOperand(1), VL);
17277 SDValue ShAmt = N->getOperand(1);
17283 ShAmt.getOperand(1),
17381 L->hasNUsesOfValue(1, 0) && L->hasNUsesOfValue(1, 1) &&
17382 Store->getChain() == SDValue(L, 1) && ISD::isNormalLoad(L) &&
17398 // Combine store of vmv.x.s/vfmv.f.s to vse with VL of 1.
17404 isNullConstant(Val.getOperand(1)))) {
17414 DAG.getConstant(1, DL, MaskVT),
17415 DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
17448 SDValue Scalar = N->getOperand(1);
17451 // If VL is 1, we can use vfmv.s.f.
17459 SDValue Scalar = N->getOperand(1);
17467 if (SimplifyDemandedLowBitsHelper(1, EltWidth))
17470 // If VL is 1 and the scalar value won't benefit from immediate, we can
17481 SDValue Src = N->getOperand(1);
17487 isNullConstant(Src.getOperand(1)) &&
17502 SDValue Scalar = N->getOperand(1);
17546 unsigned IntOpNo = N->getOpcode() == ISD::INTRINSIC_WO_CHAIN ? 0 : 1;
17599 // If VL is 0, vcpop -> li 0, vfirst -> li -1.
17604 return DAG.getConstant(-1, DL, VT);
17668 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
17669 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
17720 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
17797 unsigned Shift = 1 << Stage;
17828 Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
17832 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
17840 Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
17847 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
17848 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
17857 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
17858 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
17867 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
17868 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
17875 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
17882 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
17892 Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
17907 Known.Zero.setBitsFrom(Log2_32(MaxVLenB)+1);
17920 Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
17928 unsigned VSEW = Op.getConstantOperandVal(HasAVL + 1);
17937 if (HasAVL && isa<ConstantSDNode>(Op.getOperand(1)))
17938 MaxVL = std::min(MaxVL, Op.getConstantOperandVal(1));
17940 unsigned KnownZeroFirstBit = Log2_32(MaxVL) + 1;
17959 DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
17960 if (Tmp == 1) return 1; // Early out.
17962 DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
17969 return DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
17974 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
17975 if (Tmp < 33) return 1;
18003 return XLen - EltBits + 1;
18007 unsigned IntNo = Op.getConstantOperandVal(1);
18034 return 1;
18086 auto *CNodeLo = GetSupportedConstantPool(Ptr.getOperand(1));
18135 Register HiReg = MI.getOperand(1).getReg();
18174 Register HiReg = MI.getOperand(1).getReg();
18210 Register LoReg = MI.getOperand(1).getReg();
18222 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
18258 Register Src1Reg = MI.getOperand(1).getReg();
18279 .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
18356 Register FLHS = First.getOperand(1).getReg();
18364 Register SLHS = Second.getOperand(1).getReg();
18433 Register LHS = MI.getOperand(1).getReg();
18449 if (SequenceMBBI->getOperand(1).getReg() != LHS ||
18575 .add(MI.getOperand(1))
18592 lookupMaskedIntrinsic(RISCV::VFCVT_F_X_V, LMul, 1 << Log2SEW)
18597 .add(MI.getOperand(1))
18697 Register SrcReg = MI.getOperand(1).getReg();
19014 if (!LocVT.isVector() && IsRet && ValNo > 1)
19080 if (RegIdx != std::size(ArgGPRs) && RegIdx % 2 == 1)
19479 // FastCC has less than 1% performance improvement for some particular
19739 while (i + 1 != e && Ins[InsIdx + 1].OrigArgIndex == ArgIndex) {
19740 CCValAssign &PartVA = ArgLocs[i + 1];
19741 unsigned PartOffset = Ins[InsIdx + 1].PartOffset - ArgPartOffset;
19979 SDValue Hi = SplitF64.getValue(1);
20025 while (i + 1 != e && Outs[OutIdx + 1].OrigArgIndex == ArgIndex) {
20026 SDValue PartValue = OutVals[OutIdx + 1];
20027 unsigned PartOffset = Outs[OutIdx + 1].PartOffset - ArgPartOffset;
20091 Glue = Chain.getValue(1);
20155 Glue = Chain.getValue(1);
20159 Glue = Chain.getValue(1);
20173 Chain = RetValue.getValue(1);
20180 Chain = RetValue2.getValue(1);
20237 SmallVector<SDValue, 4> RetOps(1, Chain);
20252 SDValue Hi = SplitF64.getValue(1);
20263 Glue = Chain.getValue(1);
20266 Glue = Chain.getValue(1);
20279 Glue = Chain.getValue(1);
20332 if (N->getNumValues() != 1)
20334 if (!N->hasNUsesOfValue(1, 0))
20351 if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == MVT::Glue)
20640 if (Constraint.size() == 1) {
20669 if (Constraint.size() == 1) {
20768 .Cases("{f14}", "{fa4}", RISCV::F14_F)
20870 // Currently only support length 1 constraints.
20871 if (ConstraintCode.size() == 1) {
20886 // Currently only support length 1 constraints.
20887 if (Constraint.size() == 1) {
21039 // In the case of an atomicrmw xchg with a constant 0/-1 operand, replace
21200 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
21209 if (isInt<5>(RHSC >> i) && ((RHSC % (1LL << i)) == 0)) {
21217 Offset = Op->getOperand(1);
21261 Offset = Op->getOperand(1);
21262 else if (Base == Op->getOperand(1))
21362 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
21363 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
21377 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
21378 (1 - ImmS).isPowerOf2())
21398 ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
21422 *Fast = 1;
21446 // operations to correspond roughly to that threshold. LMUL>1 operations
21506 // Give an example here, we want copy a <vscale x 1 x i8> value to
21508 // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
21560 // Give an example here, we want copy a <vscale x 1 x i8> value from
21563 // then we can extract <vscale x 1 x i8>.
21688 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements
21694 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1
21735 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
21739 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
21771 SVI->getOperand(0), SVI->getOperand(1),
21876 Builder.CreateCall(VssegNFunc, {II->getOperand(0), II->getOperand(1),
21926 // 1 for default value work as __RISCV_NTLH_ALL
21941 assert((1 <= NontemporalLevel && NontemporalLevel <= 5) &&
21980 return isCtpopFast(VT) ? 0 : 1;
22025 // Ensure 2**k-1 < 2048 so that we can just emit a single addi/addiw.
22058 // Return if this argument type contains only 1 element, or it's not a
22089 RVVArgInfos.push_back({1, RegisterVT, true});
22094 RVVArgInfos.push_back({1, RegisterVT, false});
22140 RVVArgInfos.push_back({1, RegisterVT, true});
22145 RVVArgInfos.insert(RVVArgInfos.end(), NumRegs, {1, RegisterVT, false});
22159 case 1:
22197 uint32_t Map = ((1 << TotalRegsNeeded) - 1) << StartReg;