Lines Matching +full:0 +full:x12340000
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
291 switch (Op.getConstantOperandVal(0)) { in isZeroingInactiveLanes()
352 Disc->getConstantOperandVal(0) == Intrinsic::ptrauth_blend) { in extractPtrauthBlendDiscriminators()
364 return std::make_tuple(DAG->getTargetConstant(0, DL, MVT::i64), Disc); in extractPtrauthBlendDiscriminators()
1395 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) in AArch64TargetLowering()
1819 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) { in AArch64TargetLowering()
1822 if ((libcallName != nullptr) && (libcallName[0] != '#')) { in AArch64TargetLowering()
1989 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) in addTypeForFixedLengthSVE()
2176 if (Imm == 0 || Imm == Mask || in optimizeLogicalImm()
2188 // the number of switching between 0 and 1. In order to achieve this goal, in optimizeLogicalImm()
2190 // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a in optimizeLogicalImm()
2192 // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'. in optimizeLogicalImm()
2193 // The final result is 0b11000011. in optimizeLogicalImm()
2219 if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0) in optimizeLogicalImm()
2236 assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 && in optimizeLogicalImm()
2247 if (NewImm == 0 || NewImm == OrigMask) { in optimizeLogicalImm()
2248 New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0), in optimizeLogicalImm()
2256 TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0); in optimizeLogicalImm()
2314 SDValue SrcOp = Op.getOperand(0); in computeKnownBitsForTargetNode()
2325 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); in computeKnownBitsForTargetNode()
2334 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); in computeKnownBitsForTargetNode()
2340 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); in computeKnownBitsForTargetNode()
2347 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); in computeKnownBitsForTargetNode()
2354 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); in computeKnownBitsForTargetNode()
2361 APInt(Known.getBitWidth(), Op->getConstantOperandVal(0))); in computeKnownBitsForTargetNode()
2373 Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); in computeKnownBitsForTargetNode()
2374 Known.Zero |= APInt(Known.getBitWidth(), 0xFE); in computeKnownBitsForTargetNode()
2395 unsigned IntNo = Op.getConstantOperandVal(0); in computeKnownBitsForTargetNode()
2459 // Compares return either 0 or all-ones in ComputeNumSignBitsForTargetNode()
2883 Register DestReg = MI.getOperand(0).getReg(); in EmitF128CSEL()
2938 Register TargetReg = MI.getOperand(0).getReg(); in EmitDynamicProbedAlloc()
2953 MIB.addReg(BaseReg + MI.getOperand(0).getImm(), RegState::Define); in EmitTileLoad()
2971 MIB.add(MI.getOperand(0)); // Vector select register in EmitFill()
2988 .addReg(MI.getOperand(0).getReg(), Op0IsDef ? RegState::Define : 0); in EmitZTInstr()
3002 unsigned StartIdx = 0; in EmitZAInstr()
3005 bool HasZPROut = HasTile && MI.getOperand(0).isReg(); in EmitZAInstr()
3017 if (MI.getOperand(0).isReg() && !MI.getOperand(1).isImm()) { in EmitZAInstr()
3035 MIB.add(MI.getOperand(0)); // Mask in EmitZero()
3037 unsigned Mask = MI.getOperand(0).getImm(); in EmitZero()
3038 for (unsigned I = 0; I < 8; I++) { in EmitZero()
3054 if (TPIDR2.Uses > 0) { in EmitInitTPIDR2Object()
3058 .addReg(MI.getOperand(0).getReg()) in EmitInitTPIDR2Object()
3060 .addImm(0); in EmitInitTPIDR2Object()
3092 if (TPIDR2.Uses > 0) { in EmitAllocateZABuffer()
3104 auto Dest = MI.getOperand(0).getReg(); in EmitAllocateZABuffer()
3238 N = N->getOperand(0).getNode(); in isZerosVector()
3246 auto Opnd0 = N->getOperand(0); in isZerosVector()
3406 bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0); in isLegalArithImmed()
3417 // Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
3422 // comparisons are only valid if op2 != 0.
3428 return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) && in isCMN()
3477 // Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ? in emitComparison()
3480 } else if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && in emitComparison()
3483 // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ? in emitComparison()
3488 // Similarly, (CMP (and X, Y), 0) can be implemented with a TST in emitComparison()
3493 LHS.getOperand(0), in emitComparison()
3514 /// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))"
3566 unsigned Opcode = 0; in emitConditionalComparison()
3581 RHS = DAG.getConstant(Imm.abs(), DL, Const->getValueType(0)); in emitConditionalComparison()
3586 } else if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && in emitConditionalComparison()
3589 // we combine a (CCMP (sub 0, op1), op2) into a CCMN instruction ? in emitConditionalComparison()
3593 if (Opcode == 0) in emitConditionalComparison()
3619 unsigned Depth = 0) { in canEmitConjunction()
3624 if (Val->getOperand(0).getValueType() == MVT::f128) in canEmitConjunction()
3635 SDValue O0 = Val->getOperand(0); in canEmitConjunction()
3685 SDValue LHS = Val->getOperand(0); in emitConjunctionRec()
3724 SDValue LHS = Val->getOperand(0); in emitConjunctionRec()
3813 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF); in getCmpOperandFoldingProfit()
3820 return 0; in getCmpOperandFoldingProfit()
3829 if (isSupportedExtend(Op.getOperand(0))) in getCmpOperandFoldingProfit()
3836 return 0; in getCmpOperandFoldingProfit()
3852 if ((VT == MVT::i32 && C != 0x80000000 && in getAArch64Cmp()
3854 (VT == MVT::i64 && C != 0x80000000ULL && in getAArch64Cmp()
3863 if ((VT == MVT::i32 && C != 0 && in getAArch64Cmp()
3865 (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) { in getAArch64Cmp()
3914 if (getCmpOperandFoldingProfit(TheLHS) + (LHSIsCMN ? 1 : 0) > in getAArch64Cmp()
3915 getCmpOperandFoldingProfit(TheRHS) + (RHSIsCMN ? 1 : 0)) { in getAArch64Cmp()
3926 // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095. in getAArch64Cmp()
3933 // ldrh w0, [x0, #0] in getAArch64Cmp()
3936 // ldrsh w0, [x0, #0] in getAArch64Cmp()
3942 if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) && in getAArch64Cmp()
3945 LHS.getNode()->hasNUsesOfValue(1, 0)) { in getAArch64Cmp()
3947 if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) { in getAArch64Cmp()
3980 SDValue LHS = Op.getOperand(0); in getAArch64XALUOOp()
3982 unsigned Opc = 0; in getAArch64XALUOOp()
4023 // tst xreg, #0xffffffff00000000 in getAArch64XALUOOp()
4024 SDValue UpperBits = DAG.getConstant(0xFFFFFFFF00000000, DL, MVT::i64); in getAArch64XALUOOp()
4047 DAG.getConstant(0, DL, MVT::i64), in getAArch64XALUOOp()
4055 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32); in getAArch64XALUOOp()
4069 SDValue Sel = Op.getOperand(0); in LowerXOR()
4077 // (csel 1, 0, invert(cc), overflow_op_bool) in LowerXOR()
4082 if (!DAG.getTargetLoweringInfo().isTypeLegal(Sel->getValueType(0))) in LowerXOR()
4086 SDValue FVal = DAG.getConstant(0, dl, MVT::i32); in LowerXOR()
4089 std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Sel.getValue(0), DAG); in LowerXOR()
4101 // (xor x, (select_cc a, b, cc, 0, -1) ) in LowerXOR()
4108 SDValue LHS = Sel.getOperand(0); in LowerXOR()
4148 // If Invert is false, sets 'C' bit of NZCV to 0 if value is 0, else sets 'C'
4149 // bit to 1. If Invert is true, sets 'C' bit of NZCV to 1 if value is 0, else
4150 // sets 'C' bit to 0.
4154 SDValue Op0 = Invert ? DAG.getConstant(0, DL, VT) : Value; in valueToCarryFlag()
4161 // If Invert is false, value is 1 if 'C' bit of NZCV is 1, else 0.
4162 // If Invert is true, value is 0 if 'C' bit of NZCV is 1, else 1.
4167 SDValue Zero = DAG.getConstant(0, DL, VT); in carryFlagToValue()
4174 // Value is 1 if 'V' bit of NZCV is 1, else 0
4178 SDValue Zero = DAG.getConstant(0, DL, VT); in overflowFlagToValue()
4188 EVT VT0 = Op.getValue(0).getValueType(); in lowerADDSUBO_CARRY()
4195 SDValue OpLHS = Op.getOperand(0); in lowerADDSUBO_CARRY()
4223 // We use 0 and 1 as false and true values. in LowerXALUO()
4225 SDValue FVal = DAG.getConstant(0, dl, MVT::i32); in LowerXALUO()
4241 // 3: int locality (0 = no locality ... 3 = extreme locality)
4256 // The encoding starts at 0 for level 1 in LowerPREFETCH()
4265 return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0), in LowerPREFETCH()
4290 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); in LowerFP_ROUND()
4313 DAG.getConstant(0x400000, dl, I32)); in LowerFP_ROUND()
4327 DAG.getNode(ISD::ADD, dl, I32, DAG.getConstant(0x7fff, dl, I32), Lsb); in LowerFP_ROUND()
4331 // Don't round if we had a NaN, we don't want to turn 0x7fffffff into in LowerFP_ROUND()
4332 // 0x80000000. in LowerFP_ROUND()
4350 return IsStrict ? DAG.getMergeValues({Result, Op.getOperand(0)}, dl) in LowerFP_ROUND()
4372 EVT InVT = Op.getOperand(IsStrict ? 1 : 0).getValueType(); in LowerVectorFP_TO_INT()
4395 {Op.getOperand(0), Op.getOperand(1)}); in LowerVectorFP_TO_INT()
4397 {Ext.getValue(1), Ext.getValue(0)}); in LowerVectorFP_TO_INT()
4401 DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0))); in LowerVectorFP_TO_INT()
4411 {Op.getOperand(0), Op.getOperand(1)}); in LowerVectorFP_TO_INT()
4417 Op.getOperand(0)); in LowerVectorFP_TO_INT()
4428 {Op.getOperand(0), Op.getOperand(1)}); in LowerVectorFP_TO_INT()
4430 {Ext.getValue(1), Ext.getValue(0)}); in LowerVectorFP_TO_INT()
4432 SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0)); in LowerVectorFP_TO_INT()
4442 Op.getOperand(IsStrict ? 1 : 0), DAG.getConstant(0, dl, MVT::i64)); in LowerVectorFP_TO_INT()
4446 {Op.getOperand(0), Extract}); in LowerVectorFP_TO_INT()
4457 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); in LowerFP_TO_INT()
4469 {Op.getOperand(0), SrcVal}); in LowerFP_TO_INT()
4471 {Ext.getValue(1), Ext.getValue(0)}); in LowerFP_TO_INT()
4491 SDValue SrcVal = Op.getOperand(0); in LowerVectorFP_TO_INT_SAT()
4569 SDValue SrcVal = Op.getOperand(0); in LowerFP_TO_INT_SAT()
4625 SDValue Src = Op.getOperand(0); in LowerVectorXRINT()
4650 SDValue In = Op.getOperand(IsStrict ? 1 : 0); in LowerVectorINT_TO_FP()
4678 {Op.getOperand(0), In}); in LowerVectorINT_TO_FP()
4681 {Val.getValue(1), Val.getValue(0), DAG.getIntPtrConstant(0, dl)}); in LowerVectorINT_TO_FP()
4685 DAG.getIntPtrConstant(0, dl)); in LowerVectorINT_TO_FP()
4696 {Op.getOperand(0), In}); in LowerVectorINT_TO_FP()
4699 {In.getValue(1), In.getValue(0), DAG.getIntPtrConstant(0, dl)}); in LowerVectorINT_TO_FP()
4703 DAG.getIntPtrConstant(0, dl, /*isTarget=*/true)); in LowerVectorINT_TO_FP()
4711 return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op.getOperand(0), In}); in LowerVectorINT_TO_FP()
4720 In, DAG.getConstant(0, dl, MVT::i64)); in LowerVectorINT_TO_FP()
4724 {Op.getOperand(0), Extract}); in LowerVectorINT_TO_FP()
4737 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0); in LowerINT_TO_FP()
4746 {Op.getOperand(0), SrcVal}); in LowerINT_TO_FP()
4749 {Val.getValue(1), Val.getValue(0), DAG.getIntPtrConstant(0, dl)}); in LowerINT_TO_FP()
4753 DAG.getIntPtrConstant(0, dl)); in LowerINT_TO_FP()
4779 // uint64_t SrcHi = SrcVal & ~0xfffull; in LowerINT_TO_FP()
4780 // uint64_t SrcLo = SrcVal & 0xfffull; in LowerINT_TO_FP()
4782 // bool HasHighest = Highest != 0; in LowerINT_TO_FP()
4786 // uint64_t HasLo = SrcLo != 0; in LowerINT_TO_FP()
4808 DAG.getConstant(~0xfffull, DL, MVT::i64)); in LowerINT_TO_FP()
4810 DAG.getConstant(0xfffull, DL, MVT::i64)); in LowerINT_TO_FP()
4814 SDValue Zero64 = DAG.getConstant(0, DL, MVT::i64); in LowerINT_TO_FP()
4819 {Op.getOperand(0), ToRound}) in LowerINT_TO_FP()
4848 DAG.getIntPtrConstant(0, DL)}) in LowerINT_TO_FP()
4850 DAG.getIntPtrConstant(0, DL, true)); in LowerINT_TO_FP()
4875 SDValue Arg = Op.getOperand(0); in LowerFSINCOS()
4909 EVT ArgVT = Op.getOperand(0).getValueType(); in LowerBITCAST()
4928 Op.getOperand(0)); in LowerBITCAST()
4931 return getSVESafeBitCast(OpVT, Op.getOperand(0), DAG); in LowerBITCAST()
4944 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0)); in LowerBITCAST()
4991 EVT VT = OpNode->getOperand(0).getValueType(); in getConstantLaneNumOfExtractHalfOperand()
5039 return addRequiredExtensionForVectorMULL(N.getOperand(0), DAG, in skipExtensionForVectorMULL()
5040 N.getOperand(0).getValueType(), VT, in skipExtensionForVectorMULL()
5046 for (unsigned i = 0; i != NumElts; ++i) { in skipExtensionForVectorMULL()
5070 SDValue N0 = N.getOperand(0); in isAddSubSExt()
5081 SDValue N0 = N.getOperand(0); in isAddSubZExt()
5092 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 in LowerGET_ROUNDING()
5097 SDValue Chain = Op.getOperand(0); in LowerGET_ROUNDING()
5115 SDValue Chain = Op->getOperand(0); in LowerSET_ROUNDING()
5120 // is 0->3, 1->0, 2->1, 3->2. The formula we use to implement this is in LowerSET_ROUNDING()
5123 // The argument of llvm.set.rounding must be within the segment [0, 3], so in LowerSET_ROUNDING()
5131 DAG.getConstant(0x3, DL, MVT::i32)); in LowerSET_ROUNDING()
5143 FPCR = FPCR.getValue(0); in LowerSET_ROUNDING()
5159 SDValue Chain = Op->getOperand(0); in LowerGET_FPMODE()
5167 FPCR = FPCR.getValue(0); in LowerGET_FPMODE()
5178 SDValue Chain = Op->getOperand(0); in LowerSET_FPMODE()
5193 SDValue Chain = Op->getOperand(0); in LowerRESET_FPMODE()
5201 FPCR = FPCR.getValue(0); in LowerRESET_FPMODE()
5234 ZextOperand = N0.getOperand(0); in selectUmullSmull()
5236 ZextOperand = N1.getOperand(0); in selectUmullSmull()
5258 return 0; in selectUmullSmull()
5275 return 0; in selectUmullSmull()
5289 SDValue N0 = Op.getOperand(0); in LowerMUL()
5298 N0 = N0.getOperand(0); in LowerMUL()
5299 N1 = N1.getOperand(0); in LowerMUL()
5339 DAG.getConstant(0, DL, MVT::i64)); in LowerMUL()
5344 SDValue N00 = skipExtensionForVectorMULL(N0.getOperand(0), DAG); in LowerMUL()
5354 DAG.getConstant(0, DL, MVT::i64)); in LowerMUL()
5454 return DAG.getNode(ISD::AND, DL, MVT::i64, CallResult.first.getOperand(0), in getRuntimePStateSM()
5469 // ldr [%tileslice2, 0], [%ptr2, 0]
5501 int32_t ConstAddend = 0; in LowerSMELdrStr()
5508 VarAddend = VecNum.getOperand(0); in LowerSMELdrStr()
5538 {/*Chain=*/N.getOperand(0), TileSlice, Base, in LowerSMELdrStr()
5550 SDValue Chain = Op.getOperand(0); in LowerINTRINSIC_VOID()
5572 Op->getOperand(0), // Chain in LowerINTRINSIC_VOID()
5578 Op->getOperand(0), // Chain in LowerINTRINSIC_VOID()
5612 return DAG.getMergeValues({MS.getValue(0), MS.getValue(2)}, DL); in LowerINTRINSIC_W_CHAIN()
5619 unsigned IntNo = Op.getConstantOperandVal(0); in LowerINTRINSIC_WO_CHAIN()
5649 assert((!LHSLane || *LHSLane < 2) && "Expect lane to be None or 0 or 1"); in LowerINTRINSIC_WO_CHAIN()
5650 assert((!RHSLane || *RHSLane < 2) && "Expect lane to be None or 0 or 1"); in LowerINTRINSIC_WO_CHAIN()
5665 N.getOperand(0), DAG.getConstant(1, dl, MVT::i64)); in LowerINTRINSIC_WO_CHAIN()
5670 // extract_high_v2i64(duplane(<2 x Ty>, 0)). This saves a roundtrip to in LowerINTRINSIC_WO_CHAIN()
5672 // 1 to lane 0) is like this: in LowerINTRINSIC_WO_CHAIN()
5675 if (NLane && *NLane == 0) in LowerINTRINSIC_WO_CHAIN()
5678 N.getOperand(0), in LowerINTRINSIC_WO_CHAIN()
5679 DAG.getConstant(0, dl, MVT::i64)), in LowerINTRINSIC_WO_CHAIN()
5717 Op.getOperand(0), in LowerINTRINSIC_WO_CHAIN()
6048 DAG.getVectorIdxConstant(0, dl)); in LowerINTRINSIC_WO_CHAIN()
6060 DAG.getConstant(0, dl, MVT::i64)); in LowerINTRINSIC_WO_CHAIN()
6096 const EVT IndexVT = Extend.getOperand(0).getValueType(); in shouldRemoveExtendFromGSIndex()
6120 if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal->getOperand(0))) { in isVectorLoadExtDesirable()
6121 if (!isLoadExtLegalOrCustom(ISD::ZEXTLOAD, ExtVT, Ld->getValueType(0))) { in isVectorLoadExtDesirable()
6127 unsigned NumExtMaskedLoads = 0; in isVectorLoadExtDesirable()
6263 : DAG.getConstant(0, DL, ContainerVT); in LowerMGATHER()
6369 EVT VT = Op->getValueType(0); in LowerMLOAD()
6417 Trunc, DAG.getConstant(0, DL, MVT::i64)); in LowerTruncateVectorStore()
6469 StoreNode->getValue(), DAG.getConstant(0, Dl, MVT::i64)); in LowerSTORE()
6485 assert(Value->getValueType(0) == MVT::i64x8); in LowerSTORE()
6489 for (unsigned i = 0; i < 8; i++) { in LowerSTORE()
6546 for (unsigned i = 0; i < 8; i++) { in LowerLOAD()
6560 EVT VT = Op->getValueType(0); in LowerLOAD()
6586 DAG.getConstant(0, DL, MVT::i64)); in LowerLOAD()
6600 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), in LowerABS()
6601 Op.getOperand(0)); in LowerABS()
6605 Op.getOperand(0), DAG.getConstant(0, DL, VT)); in LowerABS()
6606 return DAG.getNode(AArch64ISD::CSEL, DL, VT, Op.getOperand(0), Neg, in LowerABS()
6612 SDValue Chain = Op.getOperand(0); in LowerBRCOND()
6641 ISD::FSHR, DL, VT, Op.getOperand(0), Op.getOperand(1), in LowerFunnelShift()
6652 SDValue X = Op.getOperand(0); in LowerFLDEXP()
6676 SDValue Zero = DAG.getConstant(0, DL, MVT::i64); in LowerFLDEXP()
6702 return Op.getOperand(0); in LowerADJUST_TRAMPOLINE()
6712 SDValue Chain = Op.getOperand(0); in LowerINIT_TRAMPOLINE()
7045 assert((Op.getOperand(0).getValueType() == MVT::f16 || in LowerOperation()
7046 Op.getOperand(0).getValueType() == MVT::bf16) && in LowerOperation()
7049 SDValue Ext = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0)); in LowerOperation()
7061 {Op.getOperand(0), Op.getOperand(1)}); in LowerOperation()
7063 {Ext.getValue(1), Ext.getValue(0)}); in LowerOperation()
7070 SDValue Chain = Op.getOperand(0); in LowerOperation()
7151 unsigned IID = N->getConstantOperandVal(0); in getIntrinsicID()
7288 unsigned CurArgIdx = 0; in LowerFormalArguments()
7289 for (unsigned i = 0; i != NumArgs; ++i) { in LowerFormalArguments()
7322 unsigned ExtraArgLocs = 0; in LowerFormalArguments()
7323 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { in LowerFormalArguments()
7388 // t(n+1): ch, glue = SMSTART t0:0, ...., tn:2 in LowerFormalArguments()
7436 uint32_t BEAlign = 0; in LowerFormalArguments()
7513 while (NumParts > 0) { in LowerFormalArguments()
7517 if (NumParts > 0) { in LowerFormalArguments()
7576 for (unsigned I=0; I<InVals.size(); ++I) { in LowerFormalArguments()
7626 for (unsigned I = 0, E = Ins.size(); I != E; ++I) { in LowerFormalArguments()
7688 {/*Chain*/ Buffer.getValue(1), /*Buffer ptr*/ Buffer.getValue(0)}); in LowerFormalArguments()
7732 int GPRIdx = 0; in saveVarArgRegisters()
7733 if (GPRSaveSize != 0) { in saveVarArgRegisters()
7778 int FPRIdx = 0; in saveVarArgRegisters()
7779 if (FPRSaveSize != 0) { in saveVarArgRegisters()
7813 for (unsigned i = 0; i != RVLocs.size(); ++i) { in LowerCallResult()
7818 if (i == 0 && isThisReturn) { in LowerCallResult()
7920 for (unsigned i = 0; i != NumArgs; ++i) { in analyzeCallOperands()
8128 if (FI->getIndex() < 0) { in addTokenForArgument()
8154 APInt RequredZero(SizeInBits, 0xFE); in checkZExtBool()
8168 for (unsigned I = MI.getNumOperands() - 1; I > 0; --I) in AdjustInstrPostInstrSelection()
8176 if (MI.getOperand(0).getImm() == AArch64SVCR::SVCRSM || in AdjustInstrPostInstrSelection()
8177 MI.getOperand(0).getImm() == AArch64SVCR::SVCRSMZA) { in AdjustInstrPostInstrSelection()
8281 for (unsigned i = 0; i != NumArgs; ++i) { in LowerCall()
8334 NumBytes = 0; in LowerCall()
8339 // by this amount for a tail call. In a sibling call it must be 0 because the in LowerCall()
8341 // arguments to begin at SP+0. Completely unused for non-tail calls. in LowerCall()
8342 int FPDiff = 0; in LowerCall()
8358 if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff) in LowerCall()
8366 assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); in LowerCall()
8424 PStateSM = DAG.getConstant(0, DL, MVT::i64); in LowerCall()
8451 {Chain, DAG.getConstant(0, DL, MVT::i32), ZTFrameIdx}); in LowerCall()
8469 Chain = DAG.getCALLSEQ_START(Chain, IsTailCall ? 0 : NumBytes, 0, DL); in LowerCall()
8488 unsigned ExtraArgLocs = 0; in LowerCall()
8489 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { in LowerCall()
8576 if (NumParts > 0) { in LowerCall()
8603 if (i == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && in LowerCall()
8604 Outs[0].VT == MVT::i64) { in LowerCall()
8607 assert(!Ins.empty() && Ins[0].VT == MVT::i64 && in LowerCall()
8651 uint32_t BEAlign = 0; in LowerCall()
8744 Chain = NewChain.getValue(0); in LowerCall()
8764 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); in LowerCall()
8768 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); in LowerCall()
8779 Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0); in LowerCall()
8788 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL); in LowerCall()
8899 DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0; in LowerCall()
8908 IsThisReturn ? OutVals[0] : SDValue(), RequiresSMChange); in LowerCall()
8937 {Result, DAG.getConstant(0, DL, MVT::i32), ZTFrameIdx}); in LowerCall()
8962 // Finally reset the TPIDR2_EL0 register to 0. in LowerCall()
8966 DAG.getConstant(0, DL, MVT::i64)); in LowerCall()
8971 for (unsigned I = 0; I < InVals.size(); ++I) { in LowerCall()
9029 for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size(); in LowerReturn()
9109 SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg, in LowerReturn()
9134 RetOps[0] = Chain; // Update chain. in LowerReturn()
9147 getAddr(cast<ExternalSymbolSDNode>(Arm64ECRetDest), DAG, 0); in LowerReturn()
9151 RetOps.insert(RetOps.begin() + 2, DAG.getTargetConstant(0, DL, MVT::i32)); in LowerReturn()
9185 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag); in getTargetNode()
9255 assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 && in LowerGlobalAddress()
9260 if ((OpFlags & AArch64II::MO_GOT) != 0) { in LowerGlobalAddress()
9321 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); in LowerDarwinGlobalTLSAddress()
9358 // With ptrauth-calls, the tlv access thunk pointer is authenticated (IA, 0). in LowerDarwinGlobalTLSAddress()
9362 Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64)); // Integer Disc. in LowerDarwinGlobalTLSAddress()
9391 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_PAGEOFF); in LowerELFTLSLocalExec()
9394 DAG.getTargetConstant(0, DL, MVT::i32)), in LowerELFTLSLocalExec()
9395 0); in LowerELFTLSLocalExec()
9403 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); in LowerELFTLSLocalExec()
9405 GV, DL, PtrVT, 0, in LowerELFTLSLocalExec()
9409 DAG.getTargetConstant(0, DL, MVT::i32)), in LowerELFTLSLocalExec()
9410 0); in LowerELFTLSLocalExec()
9413 DAG.getTargetConstant(0, DL, MVT::i32)), in LowerELFTLSLocalExec()
9414 0); in LowerELFTLSLocalExec()
9423 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G1); in LowerELFTLSLocalExec()
9425 GV, DL, PtrVT, 0, in LowerELFTLSLocalExec()
9429 0); in LowerELFTLSLocalExec()
9431 DAG.getTargetConstant(0, DL, MVT::i32)), in LowerELFTLSLocalExec()
9432 0); in LowerELFTLSLocalExec()
9443 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_G2); in LowerELFTLSLocalExec()
9445 GV, DL, PtrVT, 0, in LowerELFTLSLocalExec()
9448 GV, DL, PtrVT, 0, in LowerELFTLSLocalExec()
9452 0); in LowerELFTLSLocalExec()
9455 0); in LowerELFTLSLocalExec()
9457 DAG.getTargetConstant(0, DL, MVT::i32)), in LowerELFTLSLocalExec()
9458 0); in LowerELFTLSLocalExec()
9532 TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); in LowerELFGlobalTLSAddress()
9558 GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); in LowerELFGlobalTLSAddress()
9560 GV, DL, MVT::i64, 0, in LowerELFGlobalTLSAddress()
9564 DAG.getTargetConstant(0, DL, MVT::i32)), in LowerELFGlobalTLSAddress()
9565 0); in LowerELFGlobalTLSAddress()
9567 DAG.getTargetConstant(0, DL, MVT::i32)), in LowerELFGlobalTLSAddress()
9568 0); in LowerELFGlobalTLSAddress()
9574 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); in LowerELFGlobalTLSAddress()
9596 // A pointer to the TLS array is located at offset 0x58 from the TEB. in LowerWindowsGlobalTLSAddress()
9598 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x58, DL)); in LowerWindowsGlobalTLSAddress()
9629 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); in LowerWindowsGlobalTLSAddress()
9631 GV, DL, PtrVT, 0, in LowerWindowsGlobalTLSAddress()
9637 DAG.getTargetConstant(0, DL, MVT::i32)), in LowerWindowsGlobalTLSAddress()
9638 0); in LowerWindowsGlobalTLSAddress()
9698 if (TGN->getOffset() != 0) in LowerPtrAuthGlobalAddressStatically()
9708 0); in LowerPtrAuthGlobalAddressStatically()
9714 SDValue Ptr = Op.getOperand(0); in LowerPtrAuthGlobalAddress()
9722 report_fatal_error("key in ptrauth global out of range [0, " + in LowerPtrAuthGlobalAddress()
9728 "constant discriminator in ptrauth global out of range [0, 0xffff]"); in LowerPtrAuthGlobalAddress()
9734 int64_t PtrOffsetC = 0; in LowerPtrAuthGlobalAddress()
9737 Ptr = Ptr.getOperand(0); in LowerPtrAuthGlobalAddress()
9745 const bool NeedsGOTLoad = ((OpFlags & AArch64II::MO_GOT) != 0); in LowerPtrAuthGlobalAddress()
9746 assert(((OpFlags & (~AArch64II::MO_GOT)) == 0) && in LowerPtrAuthGlobalAddress()
9752 /*TargetFlags=*/0); in LowerPtrAuthGlobalAddress()
9753 assert(PtrN->getTargetFlags() == 0 && in LowerPtrAuthGlobalAddress()
9768 0); in LowerPtrAuthGlobalAddress()
9777 0); in LowerPtrAuthGlobalAddress()
9790 return {Val.getOperand(0), in lookThroughSignExtension()
9795 return {Val.getOperand(0), in lookThroughSignExtension()
9796 Val.getOperand(0)->getValueType(0).getFixedSizeInBits() - 1}; in lookThroughSignExtension()
9802 SDValue Chain = Op.getOperand(0); in LowerBR_CC()
9825 RHS = DAG.getConstant(0, dl, LHS.getValueType()); in LowerBR_CC()
9835 if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) in LowerBR_CC()
9841 std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG); in LowerBR_CC()
9858 if (RHSC && RHSC->getZExtValue() == 0 && ProduceNonFlagSettingCondBr) { in LowerBR_CC()
9867 SDValue Test = LHS.getOperand(0); in LowerBR_CC()
9883 SDValue Test = LHS.getOperand(0); in LowerBR_CC()
9948 SDValue In1 = Op.getOperand(0); in LowerFCOPYSIGN()
10046 SDValue Val = Op.getOperand(0); in LowerCTPOP_PARITY()
10061 // UMOV X0, V0.B[0] // copy byte result back to integer reg in LowerCTPOP_PARITY()
10070 DAG.getConstant(0, DL, MVT::i64)); in LowerCTPOP_PARITY()
10085 DAG.getConstant(0, DL, MVT::i64)); in LowerCTPOP_PARITY()
10107 SDValue Zeros = DAG.getConstant(0, DL, DT); in LowerCTPOP_PARITY()
10144 SDValue RBIT = DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(0)); in LowerCTTZ()
10189 SDValue Op0 = Op.getOperand(0); in LowerMinMax()
10214 REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0)); in LowerBitreverse()
10221 REVB = DAG.getNode(AArch64ISD::REV32, DL, VST, Op.getOperand(0)); in LowerBitreverse()
10228 REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0)); in LowerBitreverse()
10235 REVB = DAG.getNode(AArch64ISD::REV64, DL, VST, Op.getOperand(0)); in LowerBitreverse()
10254 N = N->getOperand(0); in isOrXorChain()
10258 WorkList.push_back(std::make_pair(N->getOperand(0), N->getOperand(1))); in isOrXorChain()
10267 if (isOrXorChain(N->getOperand(0), Num, WorkList) && in isOrXorChain()
10275 SDValue LHS = N->getOperand(0); in performOrXorChainCombine()
10278 EVT VT = N->getValueType(0); in performOrXorChainCombine()
10286 // Try to express conjunction "cmp 0 (or (xor A0 A1) (xor B0 B1))" as: in performOrXorChainCombine()
10287 // sub A0, A1; ccmp B0, B1, 0, eq; cmp inv(Cond) flag in performOrXorChainCombine()
10288 unsigned NumXors = 0; in performOrXorChainCombine()
10293 std::tie(XOR0, XOR1) = WorkList[0]; in performOrXorChainCombine()
10316 unsigned OpNo = IsStrict ? 1 : 0; in LowerSETCC()
10319 Chain = Op.getOperand(0); in LowerSETCC()
10320 SDValue LHS = Op.getOperand(OpNo + 0); in LowerSETCC()
10328 SDValue FVal = DAG.getConstant(0, dl, VT); in LowerSETCC()
10400 SDValue LHS = Op.getOperand(0); in LowerSETCCCARRY()
10415 SDValue FVal = DAG.getConstant(0, DL, OpVT); in LowerSETCCCARRY()
10439 RHS = DAG.getConstant(0, dl, LHS.getValueType()); in LowerSELECT_CC()
10472 // Check for SMAX(lhs, 0) and SMIN(lhs, 0) patterns. in LowerSELECT_CC()
10473 // (SELECT_CC setgt, lhs, 0, lhs, 0) -> (BIC lhs, (SRA lhs, typesize-1)) in LowerSELECT_CC()
10474 // (SELECT_CC setlt, lhs, 0, lhs, 0) -> (AND lhs, (SRA lhs, typesize-1)) in LowerSELECT_CC()
10511 // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so in LowerSELECT_CC()
10513 if (isNullConstant(TVal.getOperand(0))) { in LowerSELECT_CC()
10598 FVal = DAG.getConstant(0, dl, FVal.getValueType()); in LowerSELECT_CC()
10669 if (Ty.isScalableVector() && IdxVal < 0 && in LowerVECTOR_SPLICE()
10680 return DAG.getNode(AArch64ISD::SPLICE, DL, Ty, Pred, Op.getOperand(0), in LowerVECTOR_SPLICE()
10686 if (IdxVal >= 0 && (IdxVal * BlockSize / 8) < 256) in LowerVECTOR_SPLICE()
10695 SDValue LHS = Op.getOperand(0); in LowerSELECT_CC()
10705 SDValue CCVal = Op->getOperand(0); in LowerSELECT()
10740 if (!DAG.getTargetLoweringInfo().isTypeLegal(CCVal->getValueType(0))) in LowerSELECT()
10745 std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG); in LowerSELECT()
10756 LHS = CCVal.getOperand(0); in LowerSELECT()
10761 RHS = DAG.getConstant(0, DL, CCVal.getValueType()); in LowerSELECT()
10830 X16Copy.getValue(0), X16Copy.getValue(1)); in LowerBR_JT()
10831 return SDValue(B, 0); in LowerBR_JT()
10837 SDValue JTInfo = DAG.getJumpTableDebugInfo(JTI, Op.getOperand(0), DL); in LowerBR_JT()
10838 return DAG.getNode(ISD::BRIND, DL, MVT::Other, JTInfo, SDValue(Dest, 0)); in LowerBR_JT()
10842 SDValue Chain = Op.getOperand(0); in LowerBRIND()
10865 return SDValue(BrA, 0); in LowerBRIND()
10896 SDValue TargetBA = DAG.getTargetBlockAddress(BA, BAN->getValueType(0)); in LowerBlockAddress()
10906 return DAG.getCopyFromReg(SDValue(MOV, 0), DL, AArch64::X16, MVT::i64, in LowerBlockAddress()
10930 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), in LowerDarwin_VASTART()
10948 if (FuncInfo->getVarArgsGPRSize() > 0) in LowerWin64_VASTART()
10955 FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0 in LowerWin64_VASTART()
10961 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), in LowerWin64_VASTART()
10976 SDValue Chain = Op.getOperand(0); in LowerAAPCS_VASTART()
10981 // void *__stack at offset 0 in LowerAAPCS_VASTART()
10982 unsigned Offset = 0; in LowerAAPCS_VASTART()
10991 if (GPRSize > 0) { in LowerAAPCS_VASTART()
11010 if (FPRSize > 0) { in LowerAAPCS_VASTART()
11070 return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1), Op.getOperand(2), in LowerVACOPY()
11084 SDValue Chain = Op.getOperand(0); in LowerVAARG()
11137 DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0), in LowerVAARG()
11154 unsigned Depth = Op.getConstantOperandVal(0); in LowerFRAMEADDR()
11174 int FI = MFI.CreateFixedObject(4, 0, false); in LowerSPONENTRY()
11191 Reg = 0; in getRegisterByName()
11221 unsigned Depth = Op.getConstantOperandVal(0); in LowerRETURNADDR()
11249 return SDValue(St, 0); in LowerRETURNADDR()
11275 // values. Let's say we wanted to encode 0xR3FC0 which is 1.5 in BF16. We will in isFPImmLegal()
11276 // end up encoding this as the imm8 0x7f. This imm8 will be expanded to the in isFPImmLegal()
11335 ? 0 in getEstimate()
11378 for (int i = ExtraSteps; i > 0; --i) { in getSqrtEstimate()
11387 ExtraSteps = 0; in getSqrtEstimate()
11408 for (int i = ExtraSteps; i > 0; --i) { in getRecipEstimate()
11414 ExtraSteps = 0; in getRecipEstimate()
11556 AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32), in getSETCC()
11557 DAG.getConstant(0, DL, MVT::i32), in getSETCC()
11598 switch (Constraint[0]) { in getConstraintType()
11671 switch (Constraint[0]) { in getRegForInlineAsmConstraint()
11674 return std::make_pair(0U, nullptr); in getRegForInlineAsmConstraint()
11676 return std::make_pair(0U, &AArch64::GPR64x8ClassRegClass); in getRegForInlineAsmConstraint()
11678 return std::make_pair(0U, &AArch64::GPR64commonRegClass); in getRegForInlineAsmConstraint()
11679 return std::make_pair(0U, &AArch64::GPR32commonRegClass); in getRegForInlineAsmConstraint()
11685 return std::make_pair(0U, &AArch64::ZPRRegClass); in getRegForInlineAsmConstraint()
11686 return std::make_pair(0U, nullptr); in getRegForInlineAsmConstraint()
11692 return std::make_pair(0U, &AArch64::FPR16RegClass); in getRegForInlineAsmConstraint()
11694 return std::make_pair(0U, &AArch64::FPR32RegClass); in getRegForInlineAsmConstraint()
11696 return std::make_pair(0U, &AArch64::FPR64RegClass); in getRegForInlineAsmConstraint()
11698 return std::make_pair(0U, &AArch64::FPR128RegClass); in getRegForInlineAsmConstraint()
11707 return std::make_pair(0U, &AArch64::ZPR_4bRegClass); in getRegForInlineAsmConstraint()
11709 return std::make_pair(0U, &AArch64::FPR128_loRegClass); in getRegForInlineAsmConstraint()
11715 return std::make_pair(0U, &AArch64::ZPR_3bRegClass); in getRegForInlineAsmConstraint()
11721 return std::make_pair(0U, RegClass); in getRegForInlineAsmConstraint()
11725 return std::make_pair(0U, RegClass); in getRegForInlineAsmConstraint()
11747 if ((Size == 4 || Size == 5) && Constraint[0] == '{' && in getRegForInlineAsmConstraint()
11751 if (!Failed && RegNo >= 0 && RegNo <= 31) { in getRegForInlineAsmConstraint()
11769 return std::make_pair(0U, nullptr); in getRegForInlineAsmConstraint()
11794 char ConstraintLetter = Constraint[0]; in LowerAsmOperandForConstraint()
11802 // 'z' maps to xzr or wzr so it needs an input of 0. in LowerAsmOperandForConstraint()
11833 // i.e. 0 to 4095 with optional shift by 12 in LowerAsmOperandForConstraint()
11854 // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but in LowerAsmOperandForConstraint()
11855 // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice in LowerAsmOperandForConstraint()
11868 // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca in LowerAsmOperandForConstraint()
11869 // (M) or 64-bit 0x1234000000000000 (N) etc. in LowerAsmOperandForConstraint()
11876 if ((CVal & 0xFFFF) == CVal) in LowerAsmOperandForConstraint()
11878 if ((CVal & 0xFFFF0000ULL) == CVal) in LowerAsmOperandForConstraint()
11881 if ((NCVal & 0xFFFFULL) == NCVal) in LowerAsmOperandForConstraint()
11883 if ((NCVal & 0xFFFF0000ULL) == NCVal) in LowerAsmOperandForConstraint()
11890 if ((CVal & 0xFFFFULL) == CVal) in LowerAsmOperandForConstraint()
11892 if ((CVal & 0xFFFF0000ULL) == CVal) in LowerAsmOperandForConstraint()
11894 if ((CVal & 0xFFFF00000000ULL) == CVal) in LowerAsmOperandForConstraint()
11896 if ((CVal & 0xFFFF000000000000ULL) == CVal) in LowerAsmOperandForConstraint()
11899 if ((NCVal & 0xFFFFULL) == NCVal) in LowerAsmOperandForConstraint()
11901 if ((NCVal & 0xFFFF0000ULL) == NCVal) in LowerAsmOperandForConstraint()
11903 if ((NCVal & 0xFFFF00000000ULL) == NCVal) in LowerAsmOperandForConstraint()
11905 if ((NCVal & 0xFFFF000000000000ULL) == NCVal) in LowerAsmOperandForConstraint()
11940 V64Reg, DAG.getConstant(0, DL, MVT::i64)); in WidenVector()
11973 for (unsigned i = 0; i < NumElts; ++i) { in ReconstructShuffleWithRuntimeMask()
11978 SDValue OperandSourceVec = V.getOperand(0); in ReconstructShuffleWithRuntimeMask()
11993 MaskSource = MaskSource->getOperand(0); in ReconstructShuffleWithRuntimeMask()
12002 MaskSource = MaskSource.getOperand(0); in ReconstructShuffleWithRuntimeMask()
12015 MaskSourceVec = MaskSource->getOperand(0); in ReconstructShuffleWithRuntimeMask()
12018 } else if (MaskSourceVec != MaskSource->getOperand(0)) { in ReconstructShuffleWithRuntimeMask()
12070 : Vec(Vec), MinElt(std::numeric_limits<unsigned>::max()), MaxElt(0), in ReconstructShuffle()
12071 ShuffleVec(Vec), WindowBase(0), WindowScale(1) {} in ReconstructShuffle()
12079 for (unsigned i = 0; i < NumElts; ++i) { in ReconstructShuffle()
12085 V.getOperand(0).getValueType().isScalableVector()) { in ReconstructShuffle()
12095 SDValue SourceVec = V.getOperand(0); in ReconstructShuffle()
12113 for (unsigned I = 0; I < NumElts; ++I) { in ReconstructShuffle()
12116 for (unsigned OF = 0; OF < OutputFactor; OF++) in ReconstructShuffle()
12124 for (unsigned S = 0; S < Sources.size(); S++) { in ReconstructShuffle()
12125 if (V.getOperand(0) == Sources[S].Vec) { in ReconstructShuffle()
12128 for (unsigned OF = 0; OF < OutputFactor; OF++) in ReconstructShuffle()
12142 for (unsigned i = 0; i < Sources.size(); i++) { in ReconstructShuffle()
12155 for (unsigned i = 0; i < Mask.size(); i++) in ReconstructShuffle()
12238 DAG.getConstant(0, dl, MVT::i64)); in ReconstructShuffle()
12243 DAG.getConstant(0, dl, MVT::i64)); in ReconstructShuffle()
12290 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { in ReconstructShuffle()
12295 auto Src = find(Sources, Entry.getOperand(0)); in ReconstructShuffle()
12301 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); in ReconstructShuffle()
12312 for (int j = 0; j < LanesDefined; ++j) in ReconstructShuffle()
12323 for (unsigned i = 0; i < Sources.size(); ++i) in ReconstructShuffle()
12326 SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], in ReconstructShuffle()
12347 if (M[0] < 0) in isSingletonEXTMask()
12350 Imm = M[0]; in isSingletonEXTMask()
12361 ExpectedElt = 0; in isSingletonEXTMask()
12363 if (M[i] < 0) in isSingletonEXTMask()
12380 for (unsigned X = 0; X < 4; X++) { in ReconstructTruncateFromBuildVector()
12381 // Check the first item in each group is an extract from lane 0 of a v4i32 in ReconstructTruncateFromBuildVector()
12385 (BaseExt.getOperand(0).getValueType() != MVT::v4i16 && in ReconstructTruncateFromBuildVector()
12386 BaseExt.getOperand(0).getValueType() != MVT::v4i32) || in ReconstructTruncateFromBuildVector()
12388 BaseExt.getConstantOperandVal(1) != 0) in ReconstructTruncateFromBuildVector()
12390 SDValue Base = BaseExt.getOperand(0); in ReconstructTruncateFromBuildVector()
12395 Ext.getOperand(0) != Base || in ReconstructTruncateFromBuildVector()
12408 V.getOperand(0).getOperand(0), V.getOperand(4).getOperand(0), in ReconstructTruncateFromBuildVector()
12409 V.getOperand(8).getOperand(0), V.getOperand(12).getOperand(0)}; in ReconstructTruncateFromBuildVector()
12414 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Trunc[0], Trunc[1]); in ReconstructTruncateFromBuildVector()
12433 if (BlockSize % VT.getScalarSizeInBits() != 0) in isWideDUPMask()
12435 if (VT.getSizeInBits() % BlockSize != 0) in isWideDUPMask()
12443 // [0, 1, 0, 1] or [2, 3, 2, 3] or [4, 5, 6, 7, 4, 5, 6, 7] where any element in isWideDUPMask()
12445 // lane indices of the duplicated block (i.e. [0, 1], [2, 3] and [4, 5, 6, 7] in isWideDUPMask()
12448 for (size_t BlockIndex = 0; BlockIndex < NumBlocks; BlockIndex++) in isWideDUPMask()
12449 for (size_t I = 0; I < NumEltsPerBlock; I++) { in isWideDUPMask()
12451 if (Elt < 0) in isWideDUPMask()
12456 if (BlockElts[I] < 0) in isWideDUPMask()
12467 auto FirstRealEltIter = find_if(BlockElts, [](int Elt) { return Elt >= 0; }); in isWideDUPMask()
12472 DupLaneOp = 0; in isWideDUPMask()
12481 // BlockElts[0] must have the following value if it isn't undef: in isWideDUPMask()
12485 if (Elt0 % NumEltsPerBlock != 0) in isWideDUPMask()
12489 for (size_t I = 0; I < NumEltsPerBlock; I++) in isWideDUPMask()
12490 if (BlockElts[I] >= 0 && (unsigned)BlockElts[I] != Elt0 + I) in isWideDUPMask()
12502 const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; }); in isEXTMask()
12520 // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>. in isEXTMask()
12526 // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>) in isEXTMask()
12527 // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>) in isEXTMask()
12528 // For both cases, we finally use mask <5, 6, 7, 0>, which requires in isEXTMask()
12540 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
12543 if (NumElts % 2 != 0) in isZIP_v_undef_Mask()
12545 WhichResult = (M[0] == 0 ? 0 : 1); in isZIP_v_undef_Mask()
12547 for (unsigned i = 0; i != NumElts; i += 2) { in isZIP_v_undef_Mask()
12548 if ((M[i] >= 0 && (unsigned)M[i] != Idx) || in isZIP_v_undef_Mask()
12549 (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx)) in isZIP_v_undef_Mask()
12559 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
12562 WhichResult = (M[0] == 0 ? 0 : 1); in isUZP_v_undef_Mask()
12563 for (unsigned j = 0; j != 2; ++j) { in isUZP_v_undef_Mask()
12565 for (unsigned i = 0; i != Half; ++i) { in isUZP_v_undef_Mask()
12567 if (MIdx >= 0 && (unsigned)MIdx != Idx) in isUZP_v_undef_Mask()
12578 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
12581 if (NumElts % 2 != 0) in isTRN_v_undef_Mask()
12583 WhichResult = (M[0] == 0 ? 0 : 1); in isTRN_v_undef_Mask()
12584 for (unsigned i = 0; i < NumElts; i += 2) { in isTRN_v_undef_Mask()
12585 if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || in isTRN_v_undef_Mask()
12586 (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult)) in isTRN_v_undef_Mask()
12597 int NumLHSMatch = 0, NumRHSMatch = 0; in isINSMask()
12600 for (int i = 0; i < NumInputElements; ++i) { in isINSMask()
12637 for (int I = 0, E = NumElts / 2; I != E; I++) { in isConcatMask()
12654 SDValue V0 = Op.getOperand(0); in tryFormConcatFromShuffle()
12670 DAG.getConstant(0, DL, MVT::i64)); in tryFormConcatFromShuffle()
12674 DAG.getConstant(0, DL, MVT::i64)); in tryFormConcatFromShuffle()
12688 unsigned OpNum = (PFEntry >> 26) & 0x0F; in GeneratePerfectShuffle()
12690 unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1); in GeneratePerfectShuffle()
12693 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> in GeneratePerfectShuffle()
12723 while (Elt > 0) { in GeneratePerfectShuffle()
12737 unsigned ExtLane = 0; in GeneratePerfectShuffle()
12740 // OP_MOVLANE are either D movs (if bit 0x4 is set) or S movs. D movs in GeneratePerfectShuffle()
12742 if (RHSID & 0x4) { in GeneratePerfectShuffle()
12743 int MaskElt = getPFIDLane(ID, (RHSID & 0x01) << 1) >> 1; in GeneratePerfectShuffle()
12745 MaskElt = (getPFIDLane(ID, ((RHSID & 0x01) << 1) + 1) - 1) >> 1; in GeneratePerfectShuffle()
12746 assert(MaskElt >= 0 && "Didn't expect an undef movlane index!"); in GeneratePerfectShuffle()
12760 assert(MaskElt >= 0 && "Didn't expect an undef movlane index!"); in GeneratePerfectShuffle()
12774 Ext, DAG.getVectorIdxConstant(RHSID & 0x3, dl)); in GeneratePerfectShuffle()
12848 SDValue V1 = Op.getOperand(0); in GenerateTBL()
12862 // out of range values with 0s. We do need to make sure that any out-of-range in GenerateTBL()
12874 for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) { in GenerateTBL()
12907 // DAG.getBuildVector(IndexVT, DL, &TBLMask[0], in GenerateTBL()
12938 BitCast.getOperand(0).getOpcode() != ISD::EXTRACT_SUBVECTOR) in constructDup()
12943 SDValue Extract = BitCast.getOperand(0); in constructDup()
12948 if (ExtIdxInBits % CastedEltBitWidth != 0) in constructDup()
12952 if (!Extract.getOperand(0).getValueType().is128BitVector()) in constructDup()
12964 Extract.getOperand(0).getValueSizeInBits() / CastedEltBitWidth; in constructDup()
12971 V = DAG.getBitcast(CastVT, V.getOperand(0).getOperand(0)); in constructDup()
12973 V.getOperand(0).getValueType().is128BitVector()) { in constructDup()
12977 V = V.getOperand(0); in constructDup()
12997 if (NumElts % 2 != 0) in isWideTypeMask()
13001 for (unsigned i = 0; i < NumElts; i += 2) { in isWideTypeMask()
13016 if (M0 != -1 && (M0 % 2) == 0 && ((M0 + 1) == M1 || M1 == -1)) { in isWideTypeMask()
13038 // mov v0.d[0], v1.d[1]
13044 SDValue V0 = Op.getOperand(0); in tryWidenMaskForShuffle()
13075 SDValue Tbl1 = Op->getOperand(0); in tryToConvertShuffleOfTbl2ToTbl4()
13083 Tbl1->getOperand(0) != Tbl2ID || in tryToConvertShuffleOfTbl2ToTbl4()
13085 Tbl2->getOperand(0) != Tbl2ID) in tryToConvertShuffleOfTbl2ToTbl4()
13088 if (Tbl1->getValueType(0) != MVT::v16i8 || in tryToConvertShuffleOfTbl2ToTbl4()
13089 Tbl2->getValueType(0) != MVT::v16i8) in tryToConvertShuffleOfTbl2ToTbl4()
13095 for (unsigned I = 0; I < 16; I++) { in tryToConvertShuffleOfTbl2ToTbl4()
13124 SDValue SrcOp = Op.getOperand(0); in LowerZERO_EXTEND_VECTOR_INREG()
13126 assert(VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits() == 0 && in LowerZERO_EXTEND_VECTOR_INREG()
13132 SDValue Zeros = DAG.getConstant(0, dl, SrcVT); in LowerZERO_EXTEND_VECTOR_INREG()
13153 SDValue V1 = Op.getOperand(0); in LowerVECTOR_SHUFFLE()
13167 Lane = 0; in LowerVECTOR_SHUFFLE()
13169 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) in LowerVECTOR_SHUFFLE()
13171 V1.getOperand(0)); in LowerVECTOR_SHUFFLE()
13185 unsigned Lane = 0; in LowerVECTOR_SHUFFLE()
13234 unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; in LowerVECTOR_SHUFFLE()
13238 unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; in LowerVECTOR_SHUFFLE()
13242 unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; in LowerVECTOR_SHUFFLE()
13247 unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; in LowerVECTOR_SHUFFLE()
13251 unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; in LowerVECTOR_SHUFFLE()
13255 unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; in LowerVECTOR_SHUFFLE()
13295 for (unsigned i = 0; i != 4; ++i) { in LowerVECTOR_SHUFFLE()
13296 if (ShuffleMask[i] < 0) in LowerVECTOR_SHUFFLE()
13303 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + in LowerVECTOR_SHUFFLE()
13324 if (isa<ConstantSDNode>(Op.getOperand(0))) in LowerSPLAT_VECTOR()
13330 SDValue SplatVal = DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, MVT::i64); in LowerSPLAT_VECTOR()
13335 SDValue Zero = DAG.getConstant(0, DL, MVT::i64); in LowerSPLAT_VECTOR()
13370 // svand_x(svptrue_b64(), svindex_u64(0, 1), 1), in LowerDUPQLane()
13375 // create the vector 0,1,0,1,... in LowerDUPQLane()
13392 EVT VT = BVN->getValueType(0); in resolveBuildVector()
13399 for (unsigned i = 0; i < NumSplats; ++i) { in resolveBuildVector()
13450 Shift = 0; in tryAdvSIMDModImm32()
13503 Shift = 0; in tryAdvSIMDModImm16()
13622 ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0)); in isAllConstantBuildVector()
13625 EVT VT = Bvec->getValueType(0); in isAllConstantBuildVector()
13637 N = N.getOperand(0); in isAllInactivePredicate()
13647 N = N.getOperand(0); in isAllActivePredicate()
13661 N.getConstantOperandVal(0) == AArch64SVEPredPattern::all) in isAllActivePredicate()
13673 getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0)); in isAllActivePredicate()
13688 EVT VT = N->getValueType(0); in tryLowerToSLI()
13698 SDValue FirstOp = N->getOperand(0); in tryLowerToSLI()
13733 if (!isAllActivePredicate(DAG, Shift.getOperand(0))) in tryLowerToSLI()
13771 SDValue X = And.getOperand(0); in tryLowerToSLI()
13772 SDValue Y = ShiftHasPredOp ? Shift.getOperand(1) : Shift.getOperand(0); in tryLowerToSLI()
13802 SDValue LHS = Op.getOperand(0); in LowerVectorOR()
13808 BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode()); in LowerVectorOR()
13813 APInt DefBits(VT.getSizeInBits(), 0); in LowerVectorOR()
13814 APInt UndefBits(VT.getSizeInBits(), 0); in LowerVectorOR()
13874 APInt DefBits(VT.getSizeInBits(), 0); in ConstantBuildVector()
13875 APInt UndefBits(VT.getSizeInBits(), 0); in ConstantBuildVector()
13910 assert(VT.getSizeInBits() % FVT.getScalarSizeInBits() == 0); in ConstantBuildVector()
13913 APInt NegBits(VT.getSizeInBits(), 0); in ConstantBuildVector()
13915 for (unsigned i = 0; i < NumElts; i++) in ConstantBuildVector()
14006 unsigned NumConstantLanes = 0; in LowerBUILD_VECTOR()
14007 unsigned NumDifferentLanes = 0; in LowerBUILD_VECTOR()
14008 unsigned NumUndefLanes = 0; in LowerBUILD_VECTOR()
14012 unsigned ConsecutiveValCount = 0; in LowerBUILD_VECTOR()
14014 for (unsigned i = 0; i < NumElts; ++i) { in LowerBUILD_VECTOR()
14022 if (i > 0) in LowerBUILD_VECTOR()
14043 ConsecutiveValCount = 0; in LowerBUILD_VECTOR()
14080 // Check whether the extract elements match the Even pattern <0,2,4,...> or in LowerBUILD_VECTOR()
14082 for (unsigned i = 0; i < NumElts; ++i) { in LowerBUILD_VECTOR()
14090 SDValue N0 = N->getOperand(0); in LowerBUILD_VECTOR()
14106 // Extracted values are either at Even indices <0,2,4,...> or at Odd in LowerBUILD_VECTOR()
14125 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0), in LowerBUILD_VECTOR()
14126 DAG.getConstant(0, dl, MVT::i64)); in LowerBUILD_VECTOR()
14128 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0), in LowerBUILD_VECTOR()
14152 Value = Value.getOperand(0); in LowerBUILD_VECTOR()
14173 for (unsigned i = 0; i < NumElts; ++i) in LowerBUILD_VECTOR()
14198 if (!PreferDUPAndInsert && NumConstantLanes > 0 && usesOnlyOneConstantValue) { in LowerBUILD_VECTOR()
14202 APInt ConstantValueAPInt(1, 0); in LowerBUILD_VECTOR()
14214 for (unsigned i = 0; i < NumElts; ++i) { in LowerBUILD_VECTOR()
14253 for (unsigned I = 0; I < NumElts; ++I) in LowerBUILD_VECTOR()
14264 if (DifferentValueMap.size() == 2 && NumUndefLanes == 0) { in LowerBUILD_VECTOR()
14297 SmallVector<SDValue, 8> Ops1(NumElts / 2, Vals[0]); in LowerBUILD_VECTOR()
14315 // t29: v8i8 = vector_shuffle<0,1,2,3,12,13,14,15> t27, t28 in LowerBUILD_VECTOR()
14319 SDValue FirstLaneVal = Op.getOperand(0); in LowerBUILD_VECTOR()
14320 for (unsigned i = 0; i < NumElts; ++i) { in LowerBUILD_VECTOR()
14328 SmallVector<SDValue, 8> Ops1(NumElts, Vals[0]); in LowerBUILD_VECTOR()
14350 SDValue Op0 = Op.getOperand(0); in LowerBUILD_VECTOR()
14351 unsigned i = 0; in LowerBUILD_VECTOR()
14396 if (isTypeLegal(Op.getOperand(0).getValueType())) { in LowerCONCAT_VECTORS()
14407 for (unsigned I = 0, E = ConcatOps.size(); I != E; I += 2) { in LowerCONCAT_VECTORS()
14417 return ConcatOps[0]; in LowerCONCAT_VECTORS()
14431 EVT VT = Op.getOperand(0).getValueType(); in LowerINSERT_VECTOR_ELT()
14437 DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, VectorVT); in LowerINSERT_VECTOR_ELT()
14461 EVT VT = Op.getOperand(0).getValueType(); in LowerEXTRACT_VECTOR_ELT()
14469 DAG.getNode(ISD::ANY_EXTEND, DL, VectorVT, Op.getOperand(0)); in LowerEXTRACT_VECTOR_ELT()
14498 SDValue WideVec = WidenVector(Op.getOperand(0), DAG); in LowerEXTRACT_VECTOR_ELT()
14515 EVT InVT = Op.getOperand(0).getValueType(); in LowerEXTRACT_SUBVECTOR()
14526 if (Idx == 0) in LowerEXTRACT_SUBVECTOR()
14538 SDValue Vec = Op.getOperand(0); in LowerEXTRACT_SUBVECTOR()
14546 DAG.getVectorIdxConstant(0, DL)); in LowerEXTRACT_SUBVECTOR()
14571 SDValue Vec0 = Op.getOperand(0); in LowerINSERT_SUBVECTOR()
14587 DAG.getVectorIdxConstant(0, DL)); in LowerINSERT_SUBVECTOR()
14623 if (Idx == 0) { in LowerINSERT_SUBVECTOR()
14636 if (Idx == 0 && isPackedVectorType(VT, DAG)) { in LowerINSERT_SUBVECTOR()
14663 !isa<ConstantSDNode>(Op->getOperand(0))) in isPow2Splat()
14666 SplatVal = Op->getConstantOperandVal(0); in isPow2Splat()
14700 DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, dl, VT, Pg, Op->getOperand(0), in LowerDIV()
14703 Res = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), Res); in LowerDIV()
14723 SDValue Op0Lo = DAG.getNode(UnpkLo, dl, WidenedVT, Op.getOperand(0)); in LowerDIV()
14725 SDValue Op0Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(0)); in LowerDIV()
14757 return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || in isShuffleMaskLegal()
14784 Op = Op.getOperand(0); in getVShiftImm()
14799 /// 0 <= Value < ElementBits for a left shift; or
14800 /// 0 <= Value <= ElementBits for a long left shift.
14806 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); in isVShiftLImm()
14825 // Lower i1 truncate to `(x & 1) != 0`. in LowerTRUNCATE()
14827 EVT OpVT = Op.getOperand(0).getValueType(); in LowerTRUNCATE()
14828 SDValue Zero = DAG.getConstant(0, dl, OpVT); in LowerTRUNCATE()
14830 SDValue And = DAG.getNode(ISD::AND, dl, OpVT, Op.getOperand(0), One); in LowerTRUNCATE()
14837 if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(), in LowerTRUNCATE()
14866 SDValue Add = Shift->getOperand(0); in canLowerSRLToRoundingShiftForVT()
14885 RShOperand = Add->getOperand(0); in canLowerSRLToRoundingShiftForVT()
14906 return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0), in LowerVectorSRA_SRL_SHL()
14911 Op.getOperand(0), Op.getOperand(1)); in LowerVectorSRA_SRL_SHL()
14936 return DAG.getNode(Opc, DL, VT, Op.getOperand(0), in LowerVectorSRA_SRL_SHL()
14946 SDValue NegShift = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), in LowerVectorSRA_SRL_SHL()
14950 DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0), in LowerVectorSRA_SRL_SHL()
14967 unsigned SplatBitSize = 0; in EmitVectorComparison()
14974 bool IsZero = IsCnst && SplatValue == 0; in EmitVectorComparison()
15075 if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(), in LowerVSETCC()
15080 SDValue LHS = Op.getOperand(0); in LowerVSETCC()
15162 SDValue VecOp = ScalarOp.getOperand(0); in getReductionSDNode()
15165 DAG.getConstant(0, DL, MVT::i64)); in getReductionSDNode()
15254 for (unsigned Shift = NumElems / 2; Shift > 0; Shift /= 2) { in getVectorBitwiseReduce()
15270 SDValue Src = Op.getOperand(0); in LowerVECREDUCE()
15326 return getVectorBitwiseReduce(Op.getOpcode(), Op.getOperand(0), in LowerVECREDUCE()
15358 Op.getOperand(0), Op.getOperand(1), RHS, in LowerATOMIC_LOAD_AND()
15369 SDValue Chain = Op.getOperand(0); in LowerWindowsDYNAMIC_STACKALLOC()
15373 EVT VT = Node->getValueType(0); in LowerWindowsDYNAMIC_STACKALLOC()
15381 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), in LowerWindowsDYNAMIC_STACKALLOC()
15388 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); in LowerWindowsDYNAMIC_STACKALLOC()
15392 PtrVT, 0); in LowerWindowsDYNAMIC_STACKALLOC()
15418 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), in LowerWindowsDYNAMIC_STACKALLOC()
15422 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl); in LowerWindowsDYNAMIC_STACKALLOC()
15433 SDValue Chain = Op.getOperand(0); in LowerInlineDYNAMIC_STACKALLOC()
15439 EVT VT = Node->getValueType(0); in LowerInlineDYNAMIC_STACKALLOC()
15446 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), in LowerInlineDYNAMIC_STACKALLOC()
15483 APInt MulImm = Op.getConstantOperandAPInt(0); in LowerVSCALE()
15495 const EVT VT = TLI.getMemValueType(DL, CI.getArgOperand(0)->getType()); in setInfoSVEStN()
15499 for (unsigned I = 0; I < NumVecs; ++I) in setInfoSVEStN()
15507 Info.offset = 0; in setInfoSVEStN()
15538 Info.offset = 0; in getTgtMemIntrinsic()
15555 Type *VecTy = StructTy->getElementType(0); in getTgtMemIntrinsic()
15559 Info.offset = 0; in getTgtMemIntrinsic()
15572 unsigned NumElts = 0; in getTgtMemIntrinsic()
15581 Info.offset = 0; in getTgtMemIntrinsic()
15591 unsigned NumElts = 0; in getTgtMemIntrinsic()
15593 Type *VecTy = I.getArgOperand(0)->getType(); in getTgtMemIntrinsic()
15605 Info.offset = 0; in getTgtMemIntrinsic()
15613 Type *ValTy = I.getParamElementType(0); in getTgtMemIntrinsic()
15616 Info.ptrVal = I.getArgOperand(0); in getTgtMemIntrinsic()
15617 Info.offset = 0; in getTgtMemIntrinsic()
15628 Info.offset = 0; in getTgtMemIntrinsic()
15637 Info.ptrVal = I.getArgOperand(0); in getTgtMemIntrinsic()
15638 Info.offset = 0; in getTgtMemIntrinsic()
15647 Info.offset = 0; in getTgtMemIntrinsic()
15656 Info.offset = 0; in getTgtMemIntrinsic()
15663 cast<VectorType>(I.getArgOperand(0)->getType())->getElementType(); in getTgtMemIntrinsic()
15665 Info.memVT = MVT::getVT(I.getOperand(0)->getType()); in getTgtMemIntrinsic()
15667 Info.offset = 0; in getTgtMemIntrinsic()
15673 Value *Dst = I.getArgOperand(0); in getTgtMemIntrinsic()
15678 Info.offset = 0; in getTgtMemIntrinsic()
15679 Info.align = I.getParamAlign(0).valueOrOne(); in getTgtMemIntrinsic()
15730 SDValue Extract = Extend.getOperand(0); in shouldRemoveRedundantExtend()
15732 Extract = Extract.getOperand(0); in shouldRemoveRedundantExtend()
15734 EVT VecVT = Extract.getOperand(0).getValueType(); in shouldRemoveRedundantExtend()
15777 Type *Ty = User->getOperand(0)->getType(); in isProfitableToHoist()
15844 // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0. in isExtFreeImpl()
15854 if (ShiftAmt == 0 || ShiftAmt > 4) in isExtFreeImpl()
15861 if (Instr->getType() == Ext->getOperand(0)->getType()) in isExtFreeImpl()
15918 int M1Start = 0; in areExtractShuffleVectors()
15919 int M2Start = 0; in areExtractShuffleVectors()
15927 if ((M1Start != 0 && M1Start != (NumElements / 2)) || in areExtractShuffleVectors()
15928 (M2Start != 0 && M2Start != (NumElements / 2))) in areExtractShuffleVectors()
15941 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits(); in areExtractExts()
15975 Value *Base = GEP->getOperand(0); in shouldSinkVectorOfPtrs()
15986 OffsetsInst->getOperand(0)->getType()->getScalarSizeInBits() <= 32) in shouldSinkVectorOfPtrs()
16002 Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0)); in shouldSinkVScale()
16007 Value *ZExtOp = cast<Instruction>(Op)->getOperand(0); in shouldSinkVScale()
16008 Ops.push_back(&cast<Instruction>(ZExtOp)->getOperandUse(0)); in shouldSinkVScale()
16009 Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0)); in shouldSinkVScale()
16024 if (areExtractShuffleVectors(II->getOperand(0), II->getOperand(1), in shouldSinkOperands()
16026 Ops.push_back(&II->getOperandUse(0)); in shouldSinkOperands()
16042 if (isSplatShuffle(II->getOperand(0))) in shouldSinkOperands()
16043 Ops.push_back(&II->getOperandUse(0)); in shouldSinkOperands()
16059 if (auto *IIOp = dyn_cast<IntrinsicInst>(II->getOperand(0))) in shouldSinkOperands()
16061 Ops.push_back(&II->getOperandUse(0)); in shouldSinkOperands()
16104 if (!areExtractShuffleVectors(II->getOperand(0), II->getOperand(1))) in shouldSinkOperands()
16106 Ops.push_back(&II->getOperandUse(0)); in shouldSinkOperands()
16110 if (!areOperandsOfVmullHighP64(II->getArgOperand(0), in shouldSinkOperands()
16113 Ops.push_back(&II->getArgOperandUse(0)); in shouldSinkOperands()
16117 if (!shouldSinkVectorOfPtrs(II->getArgOperand(0), Ops)) in shouldSinkOperands()
16119 Ops.push_back(&II->getArgOperandUse(0)); in shouldSinkOperands()
16136 for (unsigned Op = 0; Op < I->getNumOperands(); ++Op) { in shouldSinkOperands()
16153 if (!areExtractExts(I->getOperand(0), I->getOperand(1))) in shouldSinkOperands()
16158 auto Ext1 = cast<Instruction>(I->getOperand(0)); in shouldSinkOperands()
16160 if (areExtractShuffleVectors(Ext1->getOperand(0), Ext2->getOperand(0))) { in shouldSinkOperands()
16161 Ops.push_back(&Ext1->getOperandUse(0)); in shouldSinkOperands()
16162 Ops.push_back(&Ext2->getOperandUse(0)); in shouldSinkOperands()
16165 Ops.push_back(&I->getOperandUse(0)); in shouldSinkOperands()
16182 Instruction *MainAnd = I->getOperand(0) == OtherAnd in shouldSinkOperands()
16184 : cast<Instruction>(I->getOperand(0)); in shouldSinkOperands()
16196 Ops.push_back(&MainAnd->getOperandUse(MainAnd->getOperand(0) == IA ? 1 : 0)); in shouldSinkOperands()
16197 Ops.push_back(&I->getOperandUse(0)); in shouldSinkOperands()
16208 int NumZExts = 0, NumSExts = 0; in shouldSinkOperands()
16228 match(Shuffle->getOperand(0), m_ZExtOrSExt(m_Value()))) { in shouldSinkOperands()
16229 Ops.push_back(&Shuffle->getOperandUse(0)); in shouldSinkOperands()
16231 if (match(Shuffle->getOperand(0), m_SExt(m_Value()))) in shouldSinkOperands()
16241 Value *ShuffleOperand = Shuffle->getOperand(0); in shouldSinkOperands()
16252 // Check that the insertelement is inserting into element 0 in shouldSinkOperands()
16262 // If we find that the top bits are known 0, then we can sink and allow in shouldSinkOperands()
16272 Ops.push_back(&Shuffle->getOperandUse(0)); in shouldSinkOperands()
16288 if (DstWidth % 8 != 0 || DstWidth <= 16 || DstWidth >= 64) in createTblShuffleMask()
16291 assert(DstWidth % SrcWidth == 0 && in createTblShuffleMask()
16301 unsigned SrcIndex = 0; in createTblShuffleMask()
16302 for (unsigned I = IsLittleEndian ? 0 : Factor - 1; I < MaskLen; I += Factor) in createTblShuffleMask()
16322 PoisonValue::get(SrcTy), Builder.getInt8(0), uint64_t(0)); in createTblShuffleForZExt()
16343 PoisonValue::get(SrcTy), Builder.getInt8(0), uint64_t(0)); in createTblShuffleForSExt()
16352 auto *SrcTy = cast<FixedVectorType>(TI->getOperand(0)->getType()); in createTblForTrunc()
16362 assert((SrcElemTySz % DstElemTySz == 0) && in createTblForTrunc()
16373 // 0,8,16,..Y*8th bytes for the little-endian format in createTblForTrunc()
16375 for (int Itr = 0; Itr < 16; Itr++) { in createTblForTrunc()
16393 for (int i = 0; i < ShuffleCount; ++i) in createTblForTrunc()
16402 Builder.CreateShuffleVector(TI->getOperand(0), ShuffleLanes), VecTy)); in createTblForTrunc()
16412 for (int i = 0; i < ShuffleCount; ++i) in createTblForTrunc()
16444 Value *FinalResult = Results[0]; in createTblForTrunc()
16448 std::iota(FinalMask.begin(), FinalMask.end(), 0); in createTblForTrunc()
16449 FinalResult = Builder.CreateShuffleVector(Results[0], FinalMask); in createTblForTrunc()
16454 std::iota(FinalMask.begin(), FinalMask.begin() + ElemsPerTbl, 0); in createTblForTrunc()
16457 std::iota(FinalMask.begin(), FinalMask.end(), 0); in createTblForTrunc()
16460 Builder.CreateShuffleVector(Results[0], Results[1], FinalMask); in createTblForTrunc()
16483 auto *SrcTy = dyn_cast<FixedVectorType>(I->getOperand(0)->getType()); in optimizeExtendOrTruncateConversion()
16494 if (DstWidth % 8 != 0) in optimizeExtendOrTruncateConversion()
16512 Builder, ZExt->getOperand(0), cast<FixedVectorType>(ZExt->getType()), in optimizeExtendOrTruncateConversion()
16526 Builder, I->getOperand(0), FixedVectorType::getInteger(DstTy), in optimizeExtendOrTruncateConversion()
16539 auto *Shuffle = createTblShuffleForSExt(Builder, I->getOperand(0), in optimizeExtendOrTruncateConversion()
16559 auto *WideConv = Builder.CreateFPToUI(FPToUI->getOperand(0), in optimizeExtendOrTruncateConversion()
16642 return isPowerOf2_32(MinElts) && (MinElts * ElSize) % 128 == 0; in isLegalInterleavedAccessType()
16649 if (VecSize % MinSVEVectorSize == 0 || in isLegalInterleavedAccessType()
16659 return Subtarget->isNeonAvailable() && (VecSize == 64 || VecSize % 128 == 0); in isLegalInterleavedAccessType()
16726 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements
16731 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0
16744 VectorType *VTy = Shuffles[0]->getType(); in lowerInterleavedLoad()
16804 for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { in lowerInterleavedLoad()
16808 if (LoadCount > 0) in lowerInterleavedLoad()
16819 for (unsigned i = 0; i < Shuffles.size(); i++) { in lowerInterleavedLoad()
16828 ConstantInt::get(Type::getInt64Ty(VTy->getContext()), 0)); in lowerInterleavedLoad()
16847 SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; in lowerInterleavedLoad()
16857 unsigned IdxWidth = DL.getIndexSizeInBits(0); in hasNearbyPairedStore()
16858 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0); in hasNearbyPairedStore()
16865 if (MaxLookupDist-- == 0) in hasNearbyPairedStore()
16885 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
16889 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
16915 assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store"); in lowerInterleavedStore()
16932 Value *Op0 = SVI->getOperand(0); in lowerInterleavedStore()
16970 // A 64bit st2 which does not start at element 0 will involved adding extra in lowerInterleavedStore()
16975 (Mask[0] != 0 || in lowerInterleavedStore()
17005 for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { in lowerInterleavedStore()
17010 for (unsigned i = 0; i < Factor; i++) { in lowerInterleavedStore()
17013 if (Mask[IdxI] >= 0) { in lowerInterleavedStore()
17015 Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0)); in lowerInterleavedStore()
17017 unsigned StartMask = 0; in lowerInterleavedStore()
17020 if (Mask[IdxJ] >= 0) { in lowerInterleavedStore()
17027 // In the case of all undefs we're defaulting to using elems from 0 in lowerInterleavedStore()
17031 Op0, Op1, createSequentialMask(StartMask, LaneLen, 0)); in lowerInterleavedStore()
17037 ConstantInt::get(Type::getInt64Ty(STVTy->getContext()), 0)); in lowerInterleavedStore()
17047 if (StoreCount > 0) in lowerInterleavedStore()
17066 VectorType *VTy = cast<VectorType>(DI->getType()->getContainedType(0)); in lowerDeinterleaveIntrinsicToLoad()
17100 for (unsigned I = 0; I < NumLoads; ++I) { in lowerDeinterleaveIntrinsicToLoad()
17113 VTy, Left, Builder.CreateExtractValue(LdN, 0), Idx); in lowerDeinterleaveIntrinsicToLoad()
17119 Result = Builder.CreateInsertValue(Result, Left, 0); in lowerDeinterleaveIntrinsicToLoad()
17141 VectorType *VTy = cast<VectorType>(II->getOperand(0)->getType()); in lowerInterleaveIntrinsicToStore()
17171 Value *L = II->getOperand(0); in lowerInterleaveIntrinsicToStore()
17174 for (unsigned I = 0; I < NumStores; ++I) { in lowerInterleaveIntrinsicToStore()
17182 L = Builder.CreateExtractVector(StTy, II->getOperand(0), Idx); in lowerInterleaveIntrinsicToStore()
17208 return allowsMisalignedMemoryAccesses(VT, 0, Align(1), in getOptimalMemOpType()
17238 return allowsMisalignedMemoryAccesses(VT, 0, Align(1), in getOptimalMemOpLLT()
17264 bool IsLegal = ((Immed >> 12) == 0 || in isLegalAddImmediate()
17265 ((Immed & 0xfff) == 0 && Immed >> 24 == 0)); in isLegalAddImmediate()
17280 if (Imm % 16 == 0) in isLegalAddScalableImmediate()
17291 if (Imm % 8 == 0) in isLegalAddScalableImmediate()
17294 if (Imm % 4 == 0) in isLegalAddScalableImmediate()
17297 if (Imm % 2 == 0) in isLegalAddScalableImmediate()
17364 AM.Scale = 0; in isLegalAddressingMode()
17385 (AM.ScalableOffset % VecNumBytes == 0) && VecNumBytes <= 16 && in isLegalAddressingMode()
17392 (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes); in isLegalAddressingMode()
17403 // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12 in isLegalAddressingMode()
17404 uint64_t NumBytes = 0; in isLegalAddressingMode()
17409 NumBytes = 0; in isLegalAddressingMode()
17421 int64_t HighPart = MinOffset & ~0xfffULL; in getPreferredLargeGEPBaseOffset()
17427 return 0; in getPreferredLargeGEPBaseOffset()
17478 AArch64::X16, AArch64::X17, AArch64::LR, 0 in getScratchRegisters()
17495 SDValue ShiftLHS = N->getOperand(0); in isDesirableToCommuteWithShift()
17496 EVT VT = N->getValueType(0); in isDesirableToCommuteWithShift()
17505 SDValue AndLHS = ShiftLHS.getOperand(0); in isDesirableToCommuteWithShift()
17522 (N->getOperand(0).getOpcode() == ISD::SHL || in isDesirableToCommuteXorWithShift()
17523 N->getOperand(0).getOpcode() == ISD::SRL) && in isDesirableToCommuteXorWithShift()
17528 auto *ShiftC = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1)); in isDesirableToCommuteXorWithShift()
17533 unsigned BitWidth = N->getValueType(0).getScalarSizeInBits(); in isDesirableToCommuteXorWithShift()
17534 if (N->getOperand(0).getOpcode() == ISD::SHL) in isDesirableToCommuteXorWithShift()
17536 return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt); in isDesirableToCommuteXorWithShift()
17546 N->getOperand(0).getOpcode() == ISD::SRL) || in shouldFoldConstantShiftPairToMask()
17548 N->getOperand(0).getOpcode() == ISD::SHL)) && in shouldFoldConstantShiftPairToMask()
17551 if (!N->getOperand(0)->hasOneUse()) in shouldFoldConstantShiftPairToMask()
17555 EVT VT = N->getValueType(0); in shouldFoldConstantShiftPairToMask()
17557 auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(0).getOperand(1)); in shouldFoldConstantShiftPairToMask()
17575 if (BitSize == 0) in shouldConvertConstantLoadToIntImm()
17579 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize)) in shouldConvertConstantLoadToIntImm()
17582 if ((int64_t)Val < 0) in shouldConvertConstantLoadToIntImm()
17597 return (Index == 0 || Index == ResVT.getVectorMinNumElements()); in isExtractSubvectorCheap()
17603 /// cmge X, X, #0
17606 EVT VT = N->getValueType(0); in foldVectorXorShiftIntoCmp()
17612 SDValue Shift = N->getOperand(0); in foldVectorXorShiftIntoCmp()
17624 return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0)); in foldVectorXorShiftIntoCmp()
17645 if (N->getValueType(0) != MVT::i32) in performVecReduceAddCombineWithUADDLP()
17648 SDValue VecReduceOp0 = N->getOperand(0); in performVecReduceAddCombineWithUADDLP()
17651 if (Opcode != ISD::ABS || VecReduceOp0->getValueType(0) != MVT::v16i32) in performVecReduceAddCombineWithUADDLP()
17656 if (ABS->getOperand(0)->getOpcode() != ISD::SUB || in performVecReduceAddCombineWithUADDLP()
17657 ABS->getOperand(0)->getValueType(0) != MVT::v16i32) in performVecReduceAddCombineWithUADDLP()
17660 SDValue SUB = ABS->getOperand(0); in performVecReduceAddCombineWithUADDLP()
17661 unsigned Opcode0 = SUB->getOperand(0).getOpcode(); in performVecReduceAddCombineWithUADDLP()
17664 if (SUB->getOperand(0)->getValueType(0) != MVT::v16i32 || in performVecReduceAddCombineWithUADDLP()
17665 SUB->getOperand(1)->getValueType(0) != MVT::v16i32) in performVecReduceAddCombineWithUADDLP()
17677 SDValue EXT0 = SUB->getOperand(0); in performVecReduceAddCombineWithUADDLP()
17680 if (EXT0->getOperand(0)->getValueType(0) != MVT::v16i8 || in performVecReduceAddCombineWithUADDLP()
17681 EXT1->getOperand(0)->getValueType(0) != MVT::v16i8) in performVecReduceAddCombineWithUADDLP()
17689 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0), in performVecReduceAddCombineWithUADDLP()
17692 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0), in performVecReduceAddCombineWithUADDLP()
17700 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT0->getOperand(0), in performVecReduceAddCombineWithUADDLP()
17701 DAG.getConstant(0, DL, MVT::i64)); in performVecReduceAddCombineWithUADDLP()
17703 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, EXT1->getOperand(0), in performVecReduceAddCombineWithUADDLP()
17704 DAG.getConstant(0, DL, MVT::i64)); in performVecReduceAddCombineWithUADDLP()
17731 SDValue Op0 = N->getOperand(0); in performVecReduceAddCombine()
17732 if (N->getValueType(0) != MVT::i32 || Op0.getValueType().isScalableVT() || in performVecReduceAddCombine()
17740 A = Op0.getOperand(0); in performVecReduceAddCombine()
17743 A.getOperand(0).getValueType() != B.getOperand(0).getValueType()) in performVecReduceAddCombine()
17750 EVT Op0VT = A.getOperand(0).getValueType(); in performVecReduceAddCombine()
17751 bool IsValidElementCount = Op0VT.getVectorNumElements() % 8 == 0; in performVecReduceAddCombine()
17762 B = B.getOperand(0); in performVecReduceAddCombine()
17764 unsigned IsMultipleOf16 = Op0VT.getVectorNumElements() % 16 == 0; in performVecReduceAddCombine()
17778 SDValue Zeros = DAG.getConstant(0, DL, TargetType); in performVecReduceAddCombine()
17780 A.getOperand(0), B); in performVecReduceAddCombine()
17781 return DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot); in performVecReduceAddCombine()
17786 unsigned I = 0; in performVecReduceAddCombine()
17788 SDValue Zeros = DAG.getConstant(0, DL, MVT::v4i32); in performVecReduceAddCombine()
17790 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v16i8, A.getOperand(0), in performVecReduceAddCombine()
17804 DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), ConcatSDot16); in performVecReduceAddCombine()
17806 if (VecReduce8Num == 0) in performVecReduceAddCombine()
17811 SDValue Zeros = DAG.getConstant(0, DL, MVT::v2i32); in performVecReduceAddCombine()
17813 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, A.getOperand(0), in performVecReduceAddCombine()
17820 DAG.getNode(ISD::VECREDUCE_ADD, DL, N->getValueType(0), Dot); in performVecReduceAddCombine()
17821 return DAG.getNode(ISD::ADD, DL, N->getValueType(0), VecReduceAdd16, in performVecReduceAddCombine()
17835 SDValue Op0 = A.getOperand(0); in performUADDVAddCombine()
17841 SDValue Ext0 = Op0.getOperand(0); in performUADDVAddCombine()
17842 SDValue Ext1 = Op1.getOperand(0); in performUADDVAddCombine()
17845 Ext0.getOperand(0) != Ext1.getOperand(0)) in performUADDVAddCombine()
17849 if (Ext0.getOperand(0).getValueType().getVectorNumElements() != in performUADDVAddCombine()
17852 if ((Ext0.getConstantOperandVal(1) != 0 || in performUADDVAddCombine()
17854 (Ext1.getConstantOperandVal(1) != 0 || in performUADDVAddCombine()
17859 return DAG.getNode(Opcode, SDLoc(A), VT, Ext0.getOperand(0)); in performUADDVAddCombine()
17865 if (A.getOperand(0).getOpcode() == ISD::ADD && A.getOperand(0).hasOneUse()) in performUADDVAddCombine()
17866 if (SDValue R = performUADDVAddCombine(A.getOperand(0), DAG)) in performUADDVAddCombine()
17872 A.getOperand(0)); in performUADDVAddCombine()
17885 SDValue Op0 = A.getOperand(0); in performUADDVZextCombine()
17889 SDValue Ext0 = Op0.getOperand(0); in performUADDVZextCombine()
17890 SDValue Ext1 = Op1.getOperand(0); in performUADDVZextCombine()
17917 SDValue A = N->getOperand(0); in performUADDVCombine()
17920 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), R); in performUADDVCombine()
17941 if (isIntDivCheap(N->getValueType(0), Attr)) in BuildSDIVPow2()
17942 return SDValue(N, 0); // Lower SDIV as SDIV in BuildSDIVPow2()
17944 EVT VT = N->getValueType(0); in BuildSDIVPow2()
17950 return SDValue(N, 0); in BuildSDIVPow2()
17958 // (N->getValueType(0) >> (BitWidth - 1)) to it before shifting right. in BuildSDIVPow2()
17971 if (isIntDivCheap(N->getValueType(0), Attr)) in BuildSREMPow2()
17972 return SDValue(N, 0); // Lower SREM as SREM in BuildSREMPow2()
17974 EVT VT = N->getValueType(0); in BuildSREMPow2()
17979 return SDValue(N, 0); in BuildSREMPow2()
17987 if (Lg2 == 0) in BuildSREMPow2()
17991 SDValue N0 = N->getOperand(0); in BuildSREMPow2()
17993 SDValue Zero = DAG.getConstant(0, DL, VT); in BuildSREMPow2()
18051 return Extend.getOperand(0).getValueType(); in calculatePreExtendType()
18093 SDValue Extend = BV->getOperand(0); in performBuildShuffleExtendCombine()
18133 : DAG.getAnyExtOrTrunc(Op.getOperand(0), DL, in performBuildShuffleExtendCombine()
18138 NBV = DAG.getVectorShuffle(PreExtendVT, DL, BV.getOperand(0).getOperand(0), in performBuildShuffleExtendCombine()
18141 : BV.getOperand(1).getOperand(0), in performBuildShuffleExtendCombine()
18151 EVT VT = Mul->getValueType(0); in performMulVectorExtendCombine()
18155 SDValue Op0 = performBuildShuffleExtendCombine(Mul->getOperand(0), DAG); in performMulVectorExtendCombine()
18163 return DAG.getNode(Mul->getOpcode(), DL, VT, Op0 ? Op0 : Mul->getOperand(0), in performMulVectorExtendCombine()
18167 // Combine v4i32 Mul(And(Srl(X, 15), 0x10001), 0xffff) -> v8i16 CMLTz
18170 EVT VT = N->getValueType(0); in performMulVectorCmpZeroCombine()
18174 if (N->getOperand(0).getOpcode() != ISD::AND || in performMulVectorCmpZeroCombine()
18175 N->getOperand(0).getOperand(0).getOpcode() != ISD::SRL) in performMulVectorCmpZeroCombine()
18178 SDValue And = N->getOperand(0); in performMulVectorCmpZeroCombine()
18179 SDValue Srl = And.getOperand(0); in performMulVectorCmpZeroCombine()
18197 SDValue In = DAG.getNode(AArch64ISD::NVCAST, DL, HalfVT, Srl.getOperand(0)); in performMulVectorCmpZeroCombine()
18207 EVT VT = N->getValueType(0); in performVectorExtCombine()
18209 (N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && in performVectorExtCombine()
18210 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND) || in performVectorExtCombine()
18213 N->getOperand(0).getOperand(0).getValueType() != in performVectorExtCombine()
18214 N->getOperand(1).getOperand(0).getValueType()) in performVectorExtCombine()
18218 N->getOperand(0).getOpcode() != N->getOperand(1).getOpcode()) in performVectorExtCombine()
18221 SDValue N0 = N->getOperand(0).getOperand(0); in performVectorExtCombine()
18222 SDValue N1 = N->getOperand(1).getOperand(0); in performVectorExtCombine()
18233 SDValue NewN0 = DAG.getNode(N->getOperand(0).getOpcode(), DL, HalfVT, N0); in performVectorExtCombine()
18236 return DAG.getNode(N->getOpcode() == ISD::MUL ? N->getOperand(0).getOpcode() in performVectorExtCombine()
18261 EVT VT = N->getValueType(0); in performMulCombine()
18262 SDValue N0 = N->getOperand(0); in performMulCombine()
18271 MulOper = V->getOperand(0); in performMulCombine()
18301 (IsSVECntIntrinsic(N0->getOperand(0))))) in performMulCombine()
18357 SDValue Zero = DAG.getConstant(0, DL, VT); in performMulCombine()
18371 if (Rem == 0 && NVMinus1.isPowerOf2()) { in performMulCombine()
18495 // Take advantage of vector comparisons producing 0 or -1 in each lane to in performVectorCompareAndMaskUnaryOpCombine()
18506 EVT VT = N->getValueType(0); in performVectorCompareAndMaskUnaryOpCombine()
18507 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND || in performVectorCompareAndMaskUnaryOpCombine()
18508 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC || in performVectorCompareAndMaskUnaryOpCombine()
18509 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits()) in performVectorCompareAndMaskUnaryOpCombine()
18517 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) { in performVectorCompareAndMaskUnaryOpCombine()
18524 EVT IntVT = BV->getValueType(0); in performVectorCompareAndMaskUnaryOpCombine()
18527 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0)); in performVectorCompareAndMaskUnaryOpCombine()
18531 N->getOperand(0)->getOperand(0), MaskConst); in performVectorCompareAndMaskUnaryOpCombine()
18546 EVT VT = N->getValueType(0); in performIntToFpCombine()
18551 if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits()) in performIntToFpCombine()
18557 SDValue N0 = N->getOperand(0); in performIntToFpCombine()
18587 if (!N->getValueType(0).isSimple()) in performFpToIntCombine()
18590 SDValue Op = N->getOperand(0); in performFpToIntCombine()
18607 MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); in performFpToIntCombine()
18620 if (C == -1 || C == 0 || C > Bits) in performFpToIntCombine()
18642 Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32)); in performFpToIntCombine()
18645 FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv); in performFpToIntCombine()
18652 EVT VT = N->getValueType(0); in tryCombineToBSL()
18667 SDValue N0 = N->getOperand(0); in tryCombineToBSL()
18678 for (int i = 1; i >= 0; --i) { in tryCombineToBSL()
18679 for (int j = 1; j >= 0; --j) { in tryCombineToBSL()
18698 if (!ISD::isConstantSplatVectorAllZeros(Sub.getOperand(0).getNode())) in tryCombineToBSL()
18705 if (Sub.getOperand(1) != Add.getOperand(0)) in tryCombineToBSL()
18717 for (int i = 1; i >= 0; --i) in tryCombineToBSL()
18718 for (int j = 1; j >= 0; --j) { in tryCombineToBSL()
18733 for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) { in tryCombineToBSL()
18750 // Given a tree of and/or(csel(0, 1, cc0), csel(0, 1, cc1)), we may be able to
18761 EVT VT = N->getValueType(0); in performANDORCSELCombine()
18762 SDValue CSel0 = N->getOperand(0); in performANDORCSELCombine()
18772 if (!isNullConstant(CSel0.getOperand(0)) || in performANDORCSELCombine()
18774 !isNullConstant(CSel1.getOperand(0)) || in performANDORCSELCombine()
18812 // CCMP accept the constant int the range [0, 31] in performANDORCSELCombine()
18816 DAG.getConstant(Op1->getAPIntValue().abs(), DL, Op1->getValueType(0)); in performANDORCSELCombine()
18817 CCmp = DAG.getNode(AArch64ISD::CCMN, DL, MVT_CC, Cmp1.getOperand(0), AbsOp1, in performANDORCSELCombine()
18820 CCmp = DAG.getNode(AArch64ISD::CCMP, DL, MVT_CC, Cmp1.getOperand(0), in performANDORCSELCombine()
18823 return DAG.getNode(AArch64ISD::CSEL, DL, VT, CSel0.getOperand(0), in performANDORCSELCombine()
18832 EVT VT = N->getValueType(0); in performORCombine()
18850 uint64_t MaskForTy = 0ull; in isConstantSplatVectorMaskForType()
18853 MaskForTy = 0xffull; in isConstantSplatVectorMaskForType()
18856 MaskForTy = 0xffffull; in isConstantSplatVectorMaskForType()
18859 MaskForTy = 0xffffffffull; in isConstantSplatVectorMaskForType()
18867 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) in isConstantSplatVectorMaskForType()
18874 SDValue LeafOp = SDValue(N, 0); in performReinterpretCastCombine()
18875 SDValue Op = N->getOperand(0); in performReinterpretCastCombine()
18878 Op = Op->getOperand(0); in performReinterpretCastCombine()
18887 SDValue Src = N->getOperand(0); in performSVEAndCombine()
18892 SDValue UnpkOp = Src->getOperand(0); in performSVEAndCombine()
18899 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Dup->getOperand(0)); in performSVEAndCombine()
18906 return ((ExtVal == 0xFF && VT == MVT::i8) || in performSVEAndCombine()
18907 (ExtVal == 0xFFFF && VT == MVT::i16) || in performSVEAndCombine()
18908 (ExtVal == 0xFFFFFFFF && VT == MVT::i32)); in performSVEAndCombine()
18913 EVT EltTy = UnpkOp->getValueType(0).getVectorElementType(); in performSVEAndCombine()
18932 Dup = DAG.getNode(ISD::SPLAT_VECTOR, DL, UnpkOp->getValueType(0), in performSVEAndCombine()
18936 UnpkOp->getValueType(0), UnpkOp, Dup); in performSVEAndCombine()
18938 return DAG.getNode(Opc, DL, N->getValueType(0), And); in performSVEAndCombine()
18946 if (isAllActivePredicate(DAG, N->getOperand(0))) in performSVEAndCombine()
18949 return N->getOperand(0); in performSVEAndCombine()
19003 SDValue SetCC = N->getOperand(0); in performANDSETCCCombine()
19004 EVT VT = N->getValueType(0); in performANDSETCCCombine()
19016 SetCC.getOperand(0).getValueType() == MVT::f32) { in performANDSETCCCombine()
19023 (Cmp = emitConjunction(DAG, SDValue(N, 0), CC))) { in performANDSETCCCombine()
19028 return DAG.getNode(AArch64ISD::CSINC, DL, VT, DAG.getConstant(0, DL, VT), in performANDSETCCCombine()
19029 DAG.getConstant(0, DL, VT), in performANDSETCCCombine()
19039 SDValue LHS = N->getOperand(0); in performANDCombine()
19041 EVT VT = N->getValueType(0); in performANDCombine()
19068 APInt DefBits(VT.getSizeInBits(), 0); in performANDCombine()
19069 APInt UndefBits(VT.getSizeInBits(), 0); in performANDCombine()
19073 // Any bits known to already be 0 need not be cleared again, which can help in performANDCombine()
19076 APInt ZeroSplat(VT.getSizeInBits(), 0); in performANDCombine()
19077 for (unsigned I = 0; I < VT.getSizeInBits() / Known.Zero.getBitWidth(); I++) in performANDCombine()
19082 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG, in performANDCombine()
19084 (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG, in performANDCombine()
19089 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, SDValue(N, 0), DAG, in performANDCombine()
19091 (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, SDValue(N, 0), DAG, in performANDCombine()
19102 SDValue LHS = N->getOperand(0); in performFADDCombine()
19104 EVT VT = N->getValueType(0); in performFADDCombine()
19114 unsigned Opc = A.getConstantOperandVal(0); in performFADDCombine()
19121 ISD::INTRINSIC_WO_CHAIN, DL, VT, A.getOperand(0), in performFADDCombine()
19153 (N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilege || in isPredicateCCSettingOp()
19154 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilegt || in isPredicateCCSettingOp()
19155 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehi || in isPredicateCCSettingOp()
19156 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilehs || in isPredicateCCSettingOp()
19157 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilele || in isPredicateCCSettingOp()
19158 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelo || in isPredicateCCSettingOp()
19159 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilels || in isPredicateCCSettingOp()
19160 N.getConstantOperandVal(0) == Intrinsic::aarch64_sve_whilelt || in isPredicateCCSettingOp()
19162 N.getConstantOperandVal(0) == Intrinsic::get_active_lane_mask))) in isPredicateCCSettingOp()
19168 // Materialize : i1 = extract_vector_elt t37, Constant:i64<0>
19179 SDValue N0 = N->getOperand(0); in performFirstTrueTestVectorCombine()
19191 // Extracts of lane 0 for SVE can be expressed as PTEST(Op, FIRST) ? 1 : 0 in performFirstTrueTestVectorCombine()
19194 return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::FIRST_ACTIVE); in performFirstTrueTestVectorCombine()
19209 SDValue N0 = N->getOperand(0); in performLastTrueTestVectorCombine()
19220 SDValue VS = Idx.getOperand(0); in performLastTrueTestVectorCombine()
19225 if (VS.getConstantOperandVal(0) != NumEls) in performLastTrueTestVectorCombine()
19228 // Extracts of lane EC-1 for SVE can be expressed as PTEST(Op, LAST) ? 1 : 0 in performLastTrueTestVectorCombine()
19231 return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::LAST_ACTIVE); in performLastTrueTestVectorCombine()
19244 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); in performExtractVectorEltCombine()
19246 EVT VT = N->getValueType(0); in performExtractVectorEltCombine()
19252 return VT.isInteger() ? DAG.getZExtOrTrunc(N0.getOperand(0), SDLoc(N), VT) in performExtractVectorEltCombine()
19253 : N0.getOperand(0); in performExtractVectorEltCombine()
19258 // (vector_shuffle (vXf32 Other) undef <1,X,...> )) 0)) in performExtractVectorEltCombine()
19260 // (f32 (fadd (extract_vector_elt (vXf32 Other) 0) in performExtractVectorEltCombine()
19267 SDValue N00 = N0->getOperand(IsStrict ? 1 : 0); in performExtractVectorEltCombine()
19279 if (Shuffle && Shuffle->getMaskElt(0) == 1 && in performExtractVectorEltCombine()
19280 Other == Shuffle->getOperand(0)) { in performExtractVectorEltCombine()
19282 DAG.getConstant(0, DL, MVT::i64)); in performExtractVectorEltCombine()
19294 {N0->getOperand(0), Extract1, Extract2}); in performExtractVectorEltCombine()
19295 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Ret); in performExtractVectorEltCombine()
19297 return SDValue(N, 0); in performExtractVectorEltCombine()
19308 EVT VT = N->getValueType(0); in performConcatVectorsCombine()
19309 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); in performConcatVectorsCombine()
19322 // <0, 2, 4, 6>))) in performConcatVectorsCombine()
19328 SDValue N00 = N0->getOperand(0); in performConcatVectorsCombine()
19329 SDValue N10 = N1->getOperand(0); in performConcatVectorsCombine()
19337 for (size_t i = 0; i < Mask.size(); ++i) in performConcatVectorsCombine()
19347 if (N->getOperand(0).getValueType() == MVT::v4i8 || in performConcatVectorsCombine()
19348 N->getOperand(0).getValueType() == MVT::v2i16 || in performConcatVectorsCombine()
19349 N->getOperand(0).getValueType() == MVT::v2i8) { in performConcatVectorsCombine()
19350 EVT SrcVT = N->getOperand(0).getValueType(); in performConcatVectorsCombine()
19354 if (N->getNumOperands() % 2 == 0 && in performConcatVectorsCombine()
19368 for (unsigned i = 0; i < N->getNumOperands(); i++) { in performConcatVectorsCombine()
19380 return DAG.getBitcast(N->getValueType(0), in performConcatVectorsCombine()
19400 SDValue N00 = N0->getOperand(0); in performConcatVectorsCombine()
19401 SDValue N10 = N1->getOperand(0); in performConcatVectorsCombine()
19408 N00->getOperand(0)), in performConcatVectorsCombine()
19410 N10->getOperand(0))), in performConcatVectorsCombine()
19426 SDValue N00 = N0->getOperand(0); in performConcatVectorsCombine()
19428 SDValue N10 = N1->getOperand(0); in performConcatVectorsCombine()
19441 SDValue Op = Shr.getOperand(0); in performConcatVectorsCombine()
19450 Op.getOperand(1).getConstantOperandVal(0) in performConcatVectorsCombine()
19453 isa<ConstantSDNode>(Op.getOperand(1).getOperand(0))) in performConcatVectorsCombine()
19455 Op.getOperand(1).getConstantOperandVal(0)); in performConcatVectorsCombine()
19469 SDValue X = N0.getOperand(0).getOperand(0); in performConcatVectorsCombine()
19471 : N1.getOperand(0).getOperand(0); in performConcatVectorsCombine()
19485 N1Opc == AArch64ISD::ZIP2 && N0.getOperand(0) == N1.getOperand(0) && in performConcatVectorsCombine()
19487 SDValue E0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, N0.getOperand(0), in performConcatVectorsCombine()
19500 DAG.getConstant(0, dl, MVT::i64)); in performConcatVectorsCombine()
19514 SDValue RHS = N1->getOperand(0); in performConcatVectorsCombine()
19537 EVT VT = N->getValueType(0); in performExtractSubvectorCombine()
19541 SDValue V = N->getOperand(0); in performExtractSubvectorCombine()
19548 if (isa<ConstantSDNode>(V.getOperand(0))) in performExtractSubvectorCombine()
19549 return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, V.getOperand(0)); in performExtractSubvectorCombine()
19558 SDValue Vec = N->getOperand(0); in performInsertSubvectorCombine()
19571 if (IdxVal == 0 && Vec.isUndef()) in performInsertSubvectorCombine()
19577 (IdxVal != 0 && IdxVal != NumSubElts)) in performInsertSubvectorCombine()
19584 if (IdxVal == 0) { in performInsertSubvectorCombine()
19590 DAG.getVectorIdxConstant(0, DL)); in performInsertSubvectorCombine()
19618 SDValue IID = N->getOperand(0); in tryCombineFixedPointConvert()
19620 SDValue Vec = Op1.getOperand(0); in tryCombineFixedPointConvert()
19622 EVT ResTy = N->getValueType(0); in tryCombineFixedPointConvert()
19662 N.getConstantOperandVal(1) == 0) in tryExtendDUPToExtractHigh()
19663 N = N.getOperand(0); in tryExtendDUPToExtractHigh()
19702 N = N.getOperand(0); in isEssentiallyExtractHighSubvector()
19705 if (N.getOperand(0).getValueType().isScalableVector()) in isEssentiallyExtractHighSubvector()
19708 N.getOperand(0).getValueType().getVectorNumElements() / 2; in isEssentiallyExtractHighSubvector()
19747 SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0); in isSetCC()
19755 // - csel 1, 0, cc in isSetCC()
19756 // - csel 0, 1, !cc in isSetCC()
19768 // (2) One must be 1 and the other must be 0. in isSetCC()
19769 ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0)); in isSetCC()
19791 isSetCC(Op->getOperand(0), Info)); in isSetCCOrZExtSetCC()
19802 SDValue LHS = Op->getOperand(0); in performSetccAddFolding()
19822 ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType() in performSetccAddFolding()
19841 EVT VT = Op->getValueType(0); in performSetccAddFolding()
19848 EVT VT = N->getValueType(0); in performAddUADDVCombine()
19853 SDValue LHS = N->getOperand(0); in performAddUADDVCombine()
19864 SDValue Op1 = LHS->getOperand(0); in performAddUADDVCombine()
19865 SDValue Op2 = RHS->getOperand(0); in performAddUADDVCombine()
19873 SDValue Val1 = Op1.getOperand(0); in performAddUADDVCombine()
19874 SDValue Val2 = Op2.getOperand(0); in performAddUADDVCombine()
19875 EVT ValVT = Val1->getValueType(0); in performAddUADDVCombine()
19880 DAG.getConstant(0, DL, MVT::i64)); in performAddUADDVCombine()
19887 EVT VT = N->getValueType(0); in performAddCSelIntoCSinc()
19891 SDValue LHS = N->getOperand(0); in performAddCSelIntoCSinc()
19912 ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(LHS.getOperand(0)); in performAddCSelIntoCSinc()
19951 SDValue NewNode = DAG.getNode(ISD::ADD, DL, VT, RHS, SDValue(CTVal, 0)); in performAddCSelIntoCSinc()
19960 EVT VT = N->getValueType(0); in performAddDotCombine()
19964 SDValue Dot = N->getOperand(0); in performAddDotCombine()
19970 isZerosVector(Dot.getOperand(0).getNode()); in performAddDotCombine()
19982 return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)); in isNegatedInteger()
19988 SDValue Zero = DAG.getConstant(0, DL, VT); in getNegatedInteger()
20000 if (!isNegatedInteger(SDValue(N, 0))) in performNegCSelCombine()
20007 SDValue N0 = CSel.getOperand(0); in performNegCSelCombine()
20041 MVT VT = N->getSimpleValueType(0); in performAddSubLongCombine()
20049 SDValue LHS = N->getOperand(0); in performAddSubLongCombine()
20060 if (isEssentiallyExtractHighSubvector(LHS.getOperand(0))) { in performAddSubLongCombine()
20061 RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG); in performAddSubLongCombine()
20066 } else if (isEssentiallyExtractHighSubvector(RHS.getOperand(0))) { in performAddSubLongCombine()
20067 LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG); in performAddSubLongCombine()
20079 !Op.getNode()->hasAnyUseOfValue(0); in isCMP()
20082 // (CSEL 1 0 CC Cond) => CC
20083 // (CSEL 0 1 CC Cond) => !CC
20090 SDValue OpLHS = Op.getOperand(0); in getCSETCondCode()
20101 // (SBC{S} l r (CMP 0 (CSET LO carry))) => (SBC{S} l r carry)
20111 if (!isNullConstant(CmpOp.getOperand(0))) in foldOverflowCheck()
20115 SDValue CsetOp = CmpOp->getOperand(IsAdd ? 0 : 1); in foldOverflowCheck()
20121 Op->getOperand(0), Op->getOperand(1), in foldOverflowCheck()
20125 // (ADC x 0 cond) => (CINC x HS cond)
20127 SDValue LHS = N->getOperand(0); in foldADCToCINC()
20134 EVT VT = N->getValueType(0); in foldADCToCINC()
20146 EVT VT = N->getValueType(0); in performBuildVectorCombine()
20150 SDValue Elt0 = N->getOperand(0), Elt1 = N->getOperand(1), in performBuildVectorCombine()
20157 Elt0->getOperand(0)->getOpcode() == ISD::EXTRACT_VECTOR_ELT && in performBuildVectorCombine()
20158 Elt1->getOperand(0)->getOpcode() == ISD::EXTRACT_VECTOR_ELT && in performBuildVectorCombine()
20160 isa<ConstantSDNode>(Elt0->getOperand(0)->getOperand(1)) && in performBuildVectorCombine()
20161 isa<ConstantSDNode>(Elt1->getOperand(0)->getOperand(1)) && in performBuildVectorCombine()
20162 Elt0->getOperand(0)->getOperand(0) == in performBuildVectorCombine()
20163 Elt1->getOperand(0)->getOperand(0) && in performBuildVectorCombine()
20164 Elt0->getOperand(0)->getConstantOperandVal(1) == 0 && in performBuildVectorCombine()
20165 Elt1->getOperand(0)->getConstantOperandVal(1) == 1) { in performBuildVectorCombine()
20166 SDValue LowLanesSrcVec = Elt0->getOperand(0)->getOperand(0); in performBuildVectorCombine()
20178 Elt2->getOperand(0)->getOpcode() == in performBuildVectorCombine()
20180 Elt3->getOperand(0)->getOpcode() == in performBuildVectorCombine()
20183 isa<ConstantSDNode>(Elt2->getOperand(0)->getOperand(1)) && in performBuildVectorCombine()
20184 isa<ConstantSDNode>(Elt3->getOperand(0)->getOperand(1)) && in performBuildVectorCombine()
20185 Elt2->getOperand(0)->getOperand(0) == in performBuildVectorCombine()
20186 Elt3->getOperand(0)->getOperand(0) && in performBuildVectorCombine()
20187 Elt2->getOperand(0)->getConstantOperandVal(1) == 0 && in performBuildVectorCombine()
20188 Elt3->getOperand(0)->getConstantOperandVal(1) == 1) { in performBuildVectorCombine()
20189 SDValue HighLanesSrcVec = Elt2->getOperand(0)->getOperand(0); in performBuildVectorCombine()
20206 SDValue Elt0 = N->getOperand(0), Elt1 = N->getOperand(1); in performBuildVectorCombine()
20209 Elt0->getOperand(0)->getOpcode() == ISD::EXTRACT_VECTOR_ELT && in performBuildVectorCombine()
20210 Elt1->getOperand(0)->getOpcode() == ISD::EXTRACT_VECTOR_ELT && in performBuildVectorCombine()
20211 Elt0->getOperand(0)->getOperand(0) == in performBuildVectorCombine()
20212 Elt1->getOperand(0)->getOperand(0) && in performBuildVectorCombine()
20214 isa<ConstantSDNode>(Elt0->getOperand(0)->getOperand(1)) && in performBuildVectorCombine()
20215 isa<ConstantSDNode>(Elt1->getOperand(0)->getOperand(1)) && in performBuildVectorCombine()
20216 Elt0->getOperand(0)->getConstantOperandVal(1) + 1 == in performBuildVectorCombine()
20217 Elt1->getOperand(0)->getConstantOperandVal(1) && in performBuildVectorCombine()
20220 Elt0->getOperand(0)->getConstantOperandVal(1) % in performBuildVectorCombine()
20222 0) { in performBuildVectorCombine()
20223 SDValue SrcVec = Elt0->getOperand(0)->getOperand(0); in performBuildVectorCombine()
20228 SDValue SubvectorIdx = Elt0->getOperand(0)->getOperand(1); in performBuildVectorCombine()
20240 // (build_vector (extract_elt_iXX_to_i32 vec Idx+0) in performBuildVectorCombine()
20249 SDValue Elt0 = N->getOperand(0), Elt1 = N->getOperand(1); in performBuildVectorCombine()
20257 Elt0->getOperand(0) == Elt1->getOperand(0) && in performBuildVectorCombine()
20262 Elt0->getConstantOperandVal(1) % VT.getVectorMinNumElements() == 0) { in performBuildVectorCombine()
20263 SDValue VecToExtend = Elt0->getOperand(0); in performBuildVectorCombine()
20280 EVT VT = N->getValueType(0); in performTruncateCombine()
20281 SDValue N0 = N->getOperand(0); in performTruncateCombine()
20284 SDValue Op = N0.getOperand(0); in performTruncateCombine()
20286 N0.getOperand(0).getValueType().getScalarType() == MVT::i64) in performTruncateCombine()
20302 SrcVT = N.getOperand(0).getValueType(); in isExtendOrShiftOperand()
20310 return AndMask == 0xff || AndMask == 0xffff || AndMask == 0xffffffff; in isExtendOrShiftOperand()
20336 SDValue Shift = SUB.getOperand(0); in performAddCombineSubShift()
20341 EVT VT = N->getValueType(0); in performAddCombineSubShift()
20357 EVT VT = N->getValueType(0); in performAddCombineForShiftedOperands()
20362 SDValue LHS = N->getOperand(0); in performAddCombineForShiftedOperands()
20370 uint64_t LHSImm = 0, RHSImm = 0; in performAddCombineForShiftedOperands()
20394 SDValue X = N->getOperand(0); in performSubAddMULCombine()
20403 SDValue M1 = Add.getOperand(0); in performSubAddMULCombine()
20412 EVT VT = N->getValueType(0); in performSubAddMULCombine()
20437 if (!N->getValueType(0).isFixedLengthVector()) in performSVEMulAddSubCombine()
20447 SDValue MulValue = Op1->getOperand(0); in performSVEMulAddSubCombine()
20461 return convertFromScalableVector(DAG, N->getValueType(0), NewValue); in performSVEMulAddSubCombine()
20464 if (SDValue res = performOpt(N->getOperand(0), N->getOperand(1))) in performSVEMulAddSubCombine()
20467 return performOpt(N->getOperand(1), N->getOperand(0)); in performSVEMulAddSubCombine()
20475 EVT VT = N->getValueType(0); in performAddSubIntoVectorOp()
20479 SDValue Op0 = N->getOperand(0); in performAddSubIntoVectorOp()
20493 Op0.getOperand(0).getValueType() == MVT::v1i64) { in performAddSubIntoVectorOp()
20494 Op0 = Op0.getOperand(0); in performAddSubIntoVectorOp()
20497 Op1.getOperand(0).getValueType() == MVT::v1i64) { in performAddSubIntoVectorOp()
20499 Op1 = Op1.getOperand(0); in performAddSubIntoVectorOp()
20505 DAG.getConstant(0, DL, MVT::i64)); in performAddSubIntoVectorOp()
20519 for (unsigned Op = 0; Op < BV.getNumOperands(); Op++) { in isLoadOrMultipleLoads()
20531 // t46: v16i8 = vector_shuffle<0,1,2,3,4,5,6,7,8,9,10,11,16,17,18,19> t44, t45 in isLoadOrMultipleLoads()
20532 // t44: v16i8 = vector_shuffle<0,1,2,3,4,5,6,7,16,17,18,19,u,u,u,u> t42, t43 in isLoadOrMultipleLoads()
20539 // t28: v4i8,ch = load<(load (s32) from %ir.0)> t0, t2, undef:i64 in isLoadOrMultipleLoads()
20540 if (B.getOperand(0).getOpcode() != ISD::VECTOR_SHUFFLE || in isLoadOrMultipleLoads()
20541 B.getOperand(0).getOperand(0).getOpcode() != ISD::CONCAT_VECTORS || in isLoadOrMultipleLoads()
20542 B.getOperand(0).getOperand(1).getOpcode() != ISD::CONCAT_VECTORS || in isLoadOrMultipleLoads()
20547 auto SV2 = cast<ShuffleVectorSDNode>(B.getOperand(0)); in isLoadOrMultipleLoads()
20550 for (int I = 0; I < NumSubElts; I++) { in isLoadOrMultipleLoads()
20551 // <0,1,2,3,4,5,6,7,8,9,10,11,16,17,18,19> in isLoadOrMultipleLoads()
20557 // <0,1,2,3,4,5,6,7,16,17,18,19,u,u,u,u> in isLoadOrMultipleLoads()
20563 auto *Ld0 = dyn_cast<LoadSDNode>(SV2->getOperand(0).getOperand(0)); in isLoadOrMultipleLoads()
20564 auto *Ld1 = dyn_cast<LoadSDNode>(SV2->getOperand(0).getOperand(1)); in isLoadOrMultipleLoads()
20565 auto *Ld2 = dyn_cast<LoadSDNode>(SV2->getOperand(1).getOperand(0)); in isLoadOrMultipleLoads()
20566 auto *Ld3 = dyn_cast<LoadSDNode>(B.getOperand(1).getOperand(0)); in isLoadOrMultipleLoads()
20593 unsigned Size = get<0>(L)->getValueType(0).getSizeInBits(); in areLoadedOffsetButOtherwiseSame()
20594 return Size == get<1>(L)->getValueType(0).getSizeInBits() && in areLoadedOffsetButOtherwiseSame()
20595 DAG.areNonVolatileConsecutiveLoads(get<1>(L), get<0>(L), in areLoadedOffsetButOtherwiseSame()
20606 return areLoadedOffsetButOtherwiseSame(Op0.getOperand(0), Op1.getOperand(0), in areLoadedOffsetButOtherwiseSame()
20613 EVT XVT = Op0.getOperand(0).getValueType(); in areLoadedOffsetButOtherwiseSame()
20617 return areLoadedOffsetButOtherwiseSame(Op0.getOperand(0), Op1.getOperand(0), in areLoadedOffsetButOtherwiseSame()
20635 EVT VT = N->getValueType(0); in performExtBinopLoadFold()
20641 SDValue Other = N->getOperand(0); in performExtBinopLoadFold()
20650 if (!ISD::isExtOpcode(Shift.getOperand(0).getOpcode()) || in performExtBinopLoadFold()
20652 Shift.getOperand(0).getOperand(0).getValueType() != in performExtBinopLoadFold()
20653 Other.getOperand(0).getValueType() || in performExtBinopLoadFold()
20654 !Other.hasOneUse() || !Shift.getOperand(0).hasOneUse()) in performExtBinopLoadFold()
20657 SDValue Op0 = Other.getOperand(0); in performExtBinopLoadFold()
20658 SDValue Op1 = Shift.getOperand(0).getOperand(0); in performExtBinopLoadFold()
20660 unsigned NumSubLoads = 0; in performExtBinopLoadFold()
20671 (Other.getOpcode() != Shift.getOperand(0).getOpcode() && in performExtBinopLoadFold()
20709 SmallVector<int> LowMask(NumElts, 0), HighMask(NumElts, 0); in performExtBinopLoadFold()
20710 int Hi = NumSubElts, Lo = 0; in performExtBinopLoadFold()
20711 for (unsigned i = 0; i < NumSubLoads; i++) { in performExtBinopLoadFold()
20712 for (unsigned j = 0; j < NumSubElts; j++) { in performExtBinopLoadFold()
20724 if (Other.getOpcode() != Shift.getOperand(0).getOpcode()) { in performExtBinopLoadFold()
20726 NewOp, DAG.getConstant(0, DL, MVT::i64)); in performExtBinopLoadFold()
20735 Ext1 = DAG.getNode(Shift.getOperand(0).getOpcode(), DL, VT, Extr1); in performExtBinopLoadFold()
20740 DAG.getConstant(0, DL, MVT::i64)); in performExtBinopLoadFold()
20793 SDValue LHS = N->getOperand((IID == Intrinsic::not_intrinsic) ? 0 : 1); in tryCombineLongOpWithDup()
20814 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), LHS, RHS); in tryCombineLongOpWithDup()
20816 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0), in tryCombineLongOpWithDup()
20817 N->getOperand(0), LHS, RHS); in tryCombineLongOpWithDup()
20821 MVT ElemTy = N->getSimpleValueType(0).getScalarType(); in tryCombineShiftImm()
20841 if (ShiftAmount == 0 && IID != Intrinsic::aarch64_neon_sqshlu) in tryCombineShiftImm()
20874 if (ShiftAmount < 0) { in tryCombineShiftImm()
20884 EVT VT = N->getValueType(0); in tryCombineShiftImm()
20895 if (N->getValueType(0) == MVT::i64) in tryCombineShiftImm()
20897 DAG.getConstant(0, dl, MVT::i64)); in tryCombineShiftImm()
20899 } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) { in tryCombineShiftImm()
20902 if (N->getValueType(0) == MVT::i64) in tryCombineShiftImm()
20904 DAG.getConstant(0, dl, MVT::i64)); in tryCombineShiftImm()
20924 N->getOperand(0), N->getOperand(1), AndN.getOperand(0)); in tryCombineCRC32()
20930 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), in combineAcrossLanesIntrinsic()
20934 DAG.getConstant(0, dl, MVT::i64)); in combineAcrossLanesIntrinsic()
20946 SDValue StepVector = DAG.getStepVector(DL, N->getValueType(0)); in LowerSVEIntrinsicIndex()
20947 SDValue Step = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op2); in LowerSVEIntrinsicIndex()
20948 SDValue Mul = DAG.getNode(ISD::MUL, DL, N->getValueType(0), StepVector, Step); in LowerSVEIntrinsicIndex()
20949 SDValue Base = DAG.getNode(ISD::SPLAT_VECTOR, DL, N->getValueType(0), Op1); in LowerSVEIntrinsicIndex()
20950 return DAG.getNode(ISD::ADD, DL, N->getValueType(0), Mul, Base); in LowerSVEIntrinsicIndex()
20963 return DAG.getNode(AArch64ISD::DUP_MERGE_PASSTHRU, dl, N->getValueType(0), in LowerSVEIntrinsicDUP()
20970 EVT VT = N->getValueType(0); in LowerSVEIntrinsicEXT()
21003 EVT VT = N->getValueType(0); in tryConvertSVEWideCompare()
21021 if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) { in tryConvertSVEWideCompare()
21035 if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) { in tryConvertSVEWideCompare()
21071 SDValue FVal = DAG.getConstant(0, DL, OutVT); in getPTest()
21104 EVT ReduceVT = getPackedSVEVectorVT(N->getValueType(0)); in combineSVEReductionInt()
21109 SDValue Zero = DAG.getConstant(0, DL, MVT::i64); in combineSVEReductionInt()
21110 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce, in combineSVEReductionInt()
21126 SDValue Zero = DAG.getConstant(0, DL, MVT::i64); in combineSVEReductionFP()
21127 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce, in combineSVEReductionFP()
21142 SDValue Zero = DAG.getConstant(0, DL, MVT::i64); in combineSVEReductionOrderedFP()
21150 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), Reduce, in combineSVEReductionOrderedFP()
21169 return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Op1, Op2); in convertMergedOpToPredOp()
21171 return DAG.getNode(Opc, SDLoc(N), N->getValueType(0), Pg, Op1, Op2); in convertMergedOpToPredOp()
21187 if (!N->hasNUsesOfValue(2, 0)) in tryCombineWhileLo()
21190 const uint64_t HalfSize = N->getValueType(0).getVectorMinNumElements() / 2; in tryCombineWhileLo()
21210 if (OffLo != 0 || OffHi != HalfSize) in tryCombineWhileLo()
21213 EVT HalfVec = Lo->getValueType(0); in tryCombineWhileLo()
21214 if (HalfVec != Hi->getValueType(0) || in tryCombineWhileLo()
21230 {Lo->getValueType(0), Hi->getValueType(0)}, {ID, Idx, TC}); in tryCombineWhileLo()
21232 DCI.CombineTo(Lo, R.getValue(0)); in tryCombineWhileLo()
21235 return SDValue(N, 0); in tryCombineWhileLo()
21262 return DAG.getNode(ISD::FMAXIMUM, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21265 return DAG.getNode(ISD::FMINIMUM, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21268 return DAG.getNode(ISD::FMAXNUM, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21271 return DAG.getNode(ISD::FMINNUM, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21274 return DAG.getNode(AArch64ISD::SMULL, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21277 return DAG.getNode(AArch64ISD::UMULL, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21280 return DAG.getNode(AArch64ISD::PMULL, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21293 return DAG.getNode(ISD::ABDS, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21296 return DAG.getNode(ISD::ABDU, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21300 return tryCombineCRC32(0xff, N, DAG); in performIntrinsicCombine()
21303 return tryCombineCRC32(0xffff, N, DAG); in performIntrinsicCombine()
21306 if (N->getOperand(2)->getValueType(0).getVectorElementType() == MVT::i64) in performIntrinsicCombine()
21331 return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21336 return DAG.getNode(AArch64ISD::MUL_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21339 return DAG.getNode(AArch64ISD::MULHS_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21342 return DAG.getNode(AArch64ISD::MULHU_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21345 return DAG.getNode(AArch64ISD::SMIN_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21348 return DAG.getNode(AArch64ISD::UMIN_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21351 return DAG.getNode(AArch64ISD::SMAX_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21354 return DAG.getNode(AArch64ISD::UMAX_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21357 return DAG.getNode(AArch64ISD::SHL_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21360 return DAG.getNode(AArch64ISD::SRL_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21363 return DAG.getNode(AArch64ISD::SRA_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21366 return DAG.getNode(AArch64ISD::FADD_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21369 return DAG.getNode(AArch64ISD::FDIV_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21372 return DAG.getNode(AArch64ISD::FMAX_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21375 return DAG.getNode(AArch64ISD::FMAXNM_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21378 return DAG.getNode(AArch64ISD::FMA_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21382 return DAG.getNode(AArch64ISD::FMIN_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21385 return DAG.getNode(AArch64ISD::FMINNM_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21388 return DAG.getNode(AArch64ISD::FMUL_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21391 return DAG.getNode(AArch64ISD::FSUB_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21394 return DAG.getNode(ISD::ADD, SDLoc(N), N->getValueType(0), N->getOperand(2), in performIntrinsicCombine()
21397 return DAG.getNode(ISD::SUB, SDLoc(N), N->getValueType(0), N->getOperand(2), in performIntrinsicCombine()
21402 return DAG.getNode(ISD::AND, SDLoc(N), N->getValueType(0), N->getOperand(2), in performIntrinsicCombine()
21405 return DAG.getNode(AArch64ISD::BIC, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21408 return DAG.getNode(ISD::XOR, SDLoc(N), N->getValueType(0), N->getOperand(2), in performIntrinsicCombine()
21411 return DAG.getNode(ISD::OR, SDLoc(N), N->getValueType(0), N->getOperand(2), in performIntrinsicCombine()
21414 return DAG.getNode(ISD::ABDS, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21417 return DAG.getNode(ISD::ABDU, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21420 return DAG.getNode(AArch64ISD::SDIV_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21423 return DAG.getNode(AArch64ISD::UDIV_PRED, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21428 return DAG.getNode(ISD::SSUBSAT, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21433 return DAG.getNode(ISD::USUBSAT, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21436 return DAG.getNode(ISD::SADDSAT, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21439 return DAG.getNode(ISD::SSUBSAT, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21442 return DAG.getNode(ISD::UADDSAT, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21445 return DAG.getNode(ISD::USUBSAT, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21448 return DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21453 N->getValueType(0), N->getOperand(1), N->getOperand(2), in performIntrinsicCombine()
21459 N->getValueType(0), N->getOperand(1), N->getOperand(2), in performIntrinsicCombine()
21465 N->getValueType(0), N->getOperand(1), N->getOperand(2), in performIntrinsicCombine()
21471 N->getValueType(0), N->getOperand(1), N->getOperand(2), in performIntrinsicCombine()
21477 N->getValueType(0), N->getOperand(1), N->getOperand(2), in performIntrinsicCombine()
21483 N->getValueType(0), N->getOperand(1), N->getOperand(2), in performIntrinsicCombine()
21488 N->getValueType(0), N->getOperand(1), N->getOperand(2), in performIntrinsicCombine()
21504 return DAG.getNode(ISD::VSELECT, SDLoc(N), N->getValueType(0), in performIntrinsicCombine()
21527 return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2), in performIntrinsicCombine()
21530 return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2), in performIntrinsicCombine()
21533 return getPTest(DAG, N->getValueType(0), N->getOperand(1), N->getOperand(2), in performIntrinsicCombine()
21557 N->getOperand(0)->getOpcode() == ISD::SETCC); in performSignExtendSetCCCombine()
21558 const SDValue SetCC = N->getOperand(0); in performSignExtendSetCCCombine()
21560 const SDValue CCOp0 = SetCC.getOperand(0); in performSignExtendSetCCCombine()
21562 if (!CCOp0->getValueType(0).isInteger() || in performSignExtendSetCCCombine()
21563 !CCOp1->getValueType(0).isInteger()) in performSignExtendSetCCCombine()
21572 if (isCheapToExtend(SetCC.getOperand(0)) && in performSignExtendSetCCCombine()
21575 DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp0); in performSignExtendSetCCCombine()
21577 DAG.getNode(ExtType, SDLoc(N), N->getValueType(0), CCOp1); in performSignExtendSetCCCombine()
21580 SDLoc(SetCC), N->getValueType(0), Ext1, Ext2, in performSignExtendSetCCCombine()
21595 (N->getOperand(0).getOpcode() == ISD::ABDU || in performExtendCombine()
21596 N->getOperand(0).getOpcode() == ISD::ABDS)) { in performExtendCombine()
21597 SDNode *ABDNode = N->getOperand(0).getNode(); in performExtendCombine()
21603 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), NewABD); in performExtendCombine()
21606 if (N->getValueType(0).isFixedLengthVector() && in performExtendCombine()
21608 N->getOperand(0)->getOpcode() == ISD::SETCC) in performExtendCombine()
21626 uint64_t BaseOffset = 0; in splitStoreSplat()
21637 BasePtr = BasePtr->getOperand(0); in splitStoreSplat()
21646 NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr, in splitStoreSplat()
21686 EVT VT = N->getValueType(0); in performLD1Combine()
21696 SDValue Ops[] = { N->getOperand(0), // Chain in performLD1Combine()
21705 Load = DAG.getNode(ISD::TRUNCATE, DL, VT, Load.getValue(0)); in performLD1Combine()
21712 EVT VT = N->getValueType(0); in performLDNT1Combine()
21720 SDValue PassThru = DAG.getConstant(0, DL, LoadVT); in performLDNT1Combine()
21741 EVT VT = N->getValueType(0); in performLD1ReplicateCombine()
21747 SDValue Ops[] = {N->getOperand(0), N->getOperand(2), N->getOperand(3)}; in performLD1ReplicateCombine()
21752 Load = DAG.getNode(ISD::BITCAST, DL, VT, Load.getValue(0)); in performLD1ReplicateCombine()
21773 SDValue Ops[] = { N->getOperand(0), // Chain in performST1Combine()
21780 return DAG.getNode(AArch64ISD::ST1_PRED, DL, N->getValueType(0), Ops); in performST1Combine()
21812 /// movi v0.2d, #0
21854 for (int I = 0; I < NumVecElts; ++I) { in replaceZeroVectorStore()
21903 // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32. in replaceSplatVectorStore()
21906 for (unsigned I = 0; I < NumVecElts; ++I) { in replaceSplatVectorStore()
21912 if (I == 0) in replaceSplatVectorStore()
21926 StVal = StVal.getOperand(0); in replaceSplatVectorStore()
21992 DAG.getConstant(0, DL, MVT::i64)); in splitStores()
22001 return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr, in splitStores()
22023 if (N->getOperand(0).isUndef()) in performUnpackCombine()
22024 return DAG.getUNDEF(N->getValueType(0)); in performUnpackCombine()
22029 if (N->getOperand(0).getOpcode() == ISD::MLOAD && in performUnpackCombine()
22031 MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N->getOperand(0)); in performUnpackCombine()
22036 SDValue(MLD, 0).hasOneUse() && Mask->getOpcode() == AArch64ISD::PTRUE && in performUnpackCombine()
22040 unsigned PgPattern = Mask->getConstantOperandVal(0); in performUnpackCombine()
22041 EVT VT = N->getValueType(0); in performUnpackCombine()
22049 SDValue PassThru = DAG.getConstant(0, DL, VT); in performUnpackCombine()
22068 SDValue Op0 = N->getOperand(0); in isHalvingTruncateAndConcatOfLegalIntScalableType()
22069 EVT SrcVT = Op0->getValueType(0); in isHalvingTruncateAndConcatOfLegalIntScalableType()
22070 EVT DstVT = N->getValueType(0); in isHalvingTruncateAndConcatOfLegalIntScalableType()
22081 SDValue Op0 = N->getOperand(0); in tryCombineExtendRShTrunc()
22083 EVT ResVT = N->getValueType(0); in tryCombineExtendRShTrunc()
22095 SDValue Lo = Op0.getOperand(0); in tryCombineExtendRShTrunc()
22096 SDValue Hi = Op1.getOperand(0); in tryCombineExtendRShTrunc()
22100 SDValue OrigArg = Lo.getOperand(0); in tryCombineExtendRShTrunc()
22101 if (OrigArg != Hi.getOperand(0)) in tryCombineExtendRShTrunc()
22121 EVT VT = Srl->getValueType(0); in trySimplifySrlAddToRshrnb()
22149 SDValue Op0 = N->getOperand(0); in performUzpCombine()
22151 EVT ResVT = N->getValueType(0); in performUzpCombine()
22156 Op0.getOperand(0) == Op1.getOperand(0)) { in performUzpCombine()
22158 SDValue SourceVec = Op0.getOperand(0); in performUzpCombine()
22162 if (ExtIdx0 == 0 && ExtIdx1 == NumElements / 2) { in performUzpCombine()
22168 DAG.getConstant(0, DL, OpVT)); in performUzpCombine()
22214 if (Op0.getOperand(0).getOpcode() == AArch64ISD::UZP1) { in performUzpCombine()
22215 SDValue X = Op0.getOperand(0).getOperand(0); in performUzpCombine()
22222 if (Op1.getOperand(0).getOpcode() == AArch64ISD::UZP1) { in performUzpCombine()
22223 SDValue Z = Op1.getOperand(0).getOperand(1); in performUzpCombine()
22239 if (Op0.getOperand(0).getValueType() == Op1.getOperand(0).getValueType()) { in performUzpCombine()
22240 return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0.getOperand(0), in performUzpCombine()
22241 Op1.getOperand(0)); in performUzpCombine()
22268 SourceOp0 = SourceOp0.getOperand(0); in performUzpCombine()
22269 SourceOp1 = SourceOp1.getOperand(0); in performUzpCombine()
22335 SDValue Chain = N->getOperand(0); in performGLD1Combine()
22341 EVT ResVT = N->getValueType(0); in performGLD1Combine()
22351 SDValue ExtPg = Offset.getOperand(0); in performGLD1Combine()
22381 SDValue Op = N->getOperand(0); in performVectorShiftCombine()
22391 if (DCI.DAG.ComputeNumSignBits(Op.getOperand(0)) > ShiftImm) in performVectorShiftCombine()
22392 return Op.getOperand(0); in performVectorShiftCombine()
22402 return SDValue(N, 0); in performVectorShiftCombine()
22411 if (N->getOperand(0).getOpcode() == ISD::SIGN_EXTEND && in performSunpkloCombine()
22412 N->getOperand(0)->getOperand(0)->getValueType(0).getScalarType() == in performSunpkloCombine()
22414 SDValue CC = N->getOperand(0)->getOperand(0); in performSunpkloCombine()
22415 auto VT = CC->getValueType(0).getHalfNumVectorElementsVT(*DAG.getContext()); in performSunpkloCombine()
22417 DAG.getVectorIdxConstant(0, SDLoc(N))); in performSunpkloCombine()
22418 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), N->getValueType(0), Unpk); in performSunpkloCombine()
22433 EVT VT = N->getValueType(0); in performPostLD1Combine()
22438 unsigned LoadIdx = IsLaneOp ? 1 : 0; in performPostLD1Combine()
22479 SDValue Vector = N->getOperand(0); in performPostLD1Combine()
22489 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); in performPostLD1Combine()
22511 Ops.push_back(LD->getOperand(0)); // Chain in performPostLD1Combine()
22528 SDValue(LD, 0), // The result of load in performPostLD1Combine()
22532 DCI.CombineTo(N, SDValue(UpdN.getNode(), 0)); // Dup/Inserted Result in performPostLD1Combine()
22569 SDValue Orig = Ext->getOperand(0); in foldTruncStoreOfExt()
22627 DAG.getConstant(0, DL, MVT::i64)); in combineV3I8LoadExt()
22646 return SDValue(N, 0); in performLOADCombine()
22652 return SDValue(N, 0); in performLOADCombine()
22656 MemVT.getSizeInBits() % 256 == 0 || in performLOADCombine()
22657 256 % MemVT.getScalarSizeInBits() != 0) in performLOADCombine()
22658 return SDValue(N, 0); in performLOADCombine()
22673 // Create all 256-bit loads starting from offset 0 and up to Num256Loads-1*32. in performLOADCombine()
22674 for (unsigned I = 0; I < Num256Loads; I++) { in performLOADCombine()
22703 SDValue InsertIdx = DAG.getVectorIdxConstant(0, DL); in performLOADCombine()
22717 {ConcatVectors, DAG.getVectorIdxConstant(0, DL)}); in performLOADCombine()
22723 static EVT tryGetOriginalBoolVectorType(SDValue Op, int Depth = 0) { in tryGetOriginalBoolVectorType()
22733 return Op.getOperand(0).getValueType(); in tryGetOriginalBoolVectorType()
22754 // element's bits are either all 1 or all 0.
22757 SDValue ComparisonResult(N, 0); in vectorToScalarBitmask()
22786 // Ensure that all elements' bits are either 0s or 1s. in vectorToScalarBitmask()
22795 for (unsigned Half = 0; Half < 2; ++Half) { in vectorToScalarBitmask()
22879 Value->getOperand(0).getValueType().getVectorElementType(), 4); in combineI8TruncStore()
22883 {UndefVector, Value->getOperand(0), DAG.getVectorIdxConstant(0, DL)}); in combineI8TruncStore()
22905 DAG.getConstant(0, DL, MVT::i64)); in combineI8TruncStore()
22907 MF.getMachineMemOperand(MMO, 0, 1)); in combineI8TruncStore()
22938 hasValidElementTypeForFPTruncStore(Value.getOperand(0).getValueType())) in performSTORECombine()
22939 return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), Ptr, in performSTORECombine()
22947 return SDValue(N, 0); in performSTORECombine()
22984 Value = Value.getOperand(0); in performMSTORECombine()
22988 EVT InVT = Value.getOperand(0).getValueType(); in performMSTORECombine()
22992 unsigned PgPattern = Mask->getConstantOperandVal(0); in performMSTORECombine()
23000 return DAG.getMaskedStore(MST->getChain(), DL, Value.getOperand(0), in performMSTORECombine()
23011 EVT ValueVT = Value->getValueType(0); in performMSTORECombine()
23044 Index = Index.getOperand(0); in foldIndexIntoBase()
23056 Index.getOperand(0).getOpcode() == ISD::ADD) { in foldIndexIntoBase()
23057 SDValue Add = Index.getOperand(0); in foldIndexIntoBase()
23066 Add.getOperand(0), ShiftOp); in foldIndexIntoBase()
23106 int64_t Stride = 0; in findMoreOptimalIndexType()
23108 Stride = cast<ConstantSDNode>(Index.getOperand(0))->getSExtValue(); in findMoreOptimalIndexType()
23113 Index.getOperand(0).getOpcode() == ISD::STEP_VECTOR) { in findMoreOptimalIndexType()
23117 int64_t Step = (int64_t)Index.getOperand(0).getConstantOperandVal(1); in findMoreOptimalIndexType()
23123 if (Stride == 0) in findMoreOptimalIndexType()
23172 DAG.getVTList(N->getValueType(0), MVT::Other), MGT->getMemoryVT(), DL, in performMaskedGatherScatterCombine()
23217 unsigned NewOpc = 0; in performNEONPostLDSTCombine()
23218 unsigned NumVecs = 0; in performNEONPostLDSTCombine()
23270 VecTy = N->getValueType(0); in performNEONPostLDSTCombine()
23273 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); in performNEONPostLDSTCombine()
23284 Ops.push_back(N->getOperand(0)); // Incoming chain in performNEONPostLDSTCombine()
23294 unsigned NumResultVecs = (IsStore ? 0 : NumVecs); in performNEONPostLDSTCombine()
23296 for (n = 0; n < NumResultVecs; ++n) in performNEONPostLDSTCombine()
23309 for (unsigned i = 0; i < NumResultVecs; ++i) { in performNEONPostLDSTCombine()
23376 // | ADD | |0xff| | |
23402 // patterns present in both extensions (0,7). For every distinct set of
23408 // and w10, w8, #0x0f
23431 // symbolic values and well known constants (0, 1, -1, MaxUInt) we can in isEquivalentMaskless()
23446 if ((AddConstant == 0) || in isEquivalentMaskless()
23447 (CompConstant == MaxUInt - 1 && AddConstant < 0) || in isEquivalentMaskless()
23448 (AddConstant >= 0 && CompConstant < 0) || in isEquivalentMaskless()
23449 (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant)) in isEquivalentMaskless()
23454 if ((AddConstant == 0) || in isEquivalentMaskless()
23455 (AddConstant >= 0 && CompConstant <= 0) || in isEquivalentMaskless()
23456 (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant)) in isEquivalentMaskless()
23461 if ((AddConstant >= 0 && CompConstant < 0) || in isEquivalentMaskless()
23462 (AddConstant <= 0 && CompConstant >= -1 && in isEquivalentMaskless()
23468 if ((AddConstant == 0) || in isEquivalentMaskless()
23469 (AddConstant > 0 && CompConstant <= 0) || in isEquivalentMaskless()
23470 (AddConstant < 0 && CompConstant <= AddConstant)) in isEquivalentMaskless()
23475 if ((AddConstant >= 0 && CompConstant <= 0) || in isEquivalentMaskless()
23476 (AddConstant <= 0 && CompConstant >= 0 && in isEquivalentMaskless()
23482 if ((AddConstant > 0 && CompConstant < 0) || in isEquivalentMaskless()
23483 (AddConstant < 0 && CompConstant >= 0 && in isEquivalentMaskless()
23485 (AddConstant >= 0 && CompConstant >= 0 && in isEquivalentMaskless()
23487 (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant)) in isEquivalentMaskless()
23502 // (X & C) >u Mask --> (X & (C & (~Mask)) != 0
23503 // (X & C) <u Pow2 --> (X & (C & ~(Pow2-1)) == 0
23531 AArch64ISD::ANDS, DL, SubsNode->getVTList(), AndNode->getOperand(0), in performSubsToAndsCombine()
23532 DAG.getConstant(AndSMask, DL, SubsC->getValueType(0))); in performSubsToAndsCombine()
23535 N->getOperand(CCIndex)->getValueType(0)); in performSubsToAndsCombine()
23545 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), AArch64_CC, in performSubsToAndsCombine()
23559 if (CondOpcode != AArch64ISD::SUBS || SubsNode->hasAnyUseOfValue(0) || in performCONDCombine()
23566 SDNode *AndNode = SubsNode->getOperand(0).getNode(); in performCONDCombine()
23567 unsigned MaskBits = 0; in performCONDCombine()
23587 SDValue AddValue = AndNode->getOperand(0); in performCONDCombine()
23594 SDValue AddInputValue1 = AddValue.getNode()->getOperand(0); in performCONDCombine()
23619 SDVTList VTs = DAG.getVTList(SubsNode->getValueType(0), in performCONDCombine()
23626 return SDValue(N, 0); in performCONDCombine()
23642 SDValue Chain = N->getOperand(0); in performBRCONDCombine()
23658 if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1)) in performBRCONDCombine()
23661 SDValue LHS = Cmp.getOperand(0); in performBRCONDCombine()
23698 Zero = N->getOperand(0); in foldCSELofCTTZ()
23702 CTTZ = N->getOperand(0); in foldCSELofCTTZ()
23708 CTTZ.getOperand(0).getOpcode() != ISD::CTTZ)) in foldCSELofCTTZ()
23718 ? CTTZ.getOperand(0).getOperand(0) in foldCSELofCTTZ()
23719 : CTTZ.getOperand(0); in foldCSELofCTTZ()
23721 if (X != SUBS.getOperand(0)) in foldCSELofCTTZ()
23725 ? CTTZ.getOperand(0).getValueSizeInBits() in foldCSELofCTTZ()
23741 SDValue L = Op->getOperand(0); in foldCSELOfCSEL()
23750 SDValue CmpLHS = OpCmp.getOperand(0); in foldCSELOfCSEL()
23758 SDValue X = CmpLHS->getOperand(0); in foldCSELOfCSEL()
23787 EVT VT = Op->getValueType(0); in foldCSELOfCSEL()
23798 if (N->getOperand(0) == N->getOperand(1)) in performCSELCombine()
23799 return N->getOperand(0); in performCSELCombine()
23804 // CSEL 0, cttz(X), eq(X, 0) -> AND cttz bitwidth-1 in performCSELCombine()
23805 // CSEL cttz(X), 0, ne(X, 0) -> AND cttz bitwidth-1 in performCSELCombine()
23816 EVT Op0MVT = Op->getOperand(0).getValueType(); in tryToWidenSetCCOperands()
23825 EVT UseMVT = FirstUse->getValueType(0); in tryToWidenSetCCOperands()
23829 return N->getOpcode() != ISD::VSELECT || N->getValueType(0) != UseMVT; in tryToWidenSetCCOperands()
23844 Op->getOperand(0)); in tryToWidenSetCCOperands()
23846 Op->getOperand(0)); in tryToWidenSetCCOperands()
23848 Op0ExtV = SDValue(Op0SExt, 0); in tryToWidenSetCCOperands()
23851 Op0ExtV = SDValue(Op0ZExt, 0); in tryToWidenSetCCOperands()
23863 SDValue Vec = N->getOperand(0); in performVecReduceBitwiseCombine()
23869 return getVectorBitwiseReduce(N->getOpcode(), Vec, N->getValueType(0), DL, in performVecReduceBitwiseCombine()
23880 SDValue LHS = N->getOperand(0); in performSETCCCombine()
23884 EVT VT = N->getValueType(0); in performSETCCCombine()
23889 // setcc (csel 0, 1, cond, X), 1, ne ==> csel 0, 1, !cond, X in performSETCCCombine()
23892 isNullConstant(LHS->getOperand(0)) && isOneConstant(LHS->getOperand(1)) && in performSETCCCombine()
23899 // csel 0, 1, !cond, X in performSETCCCombine()
23901 DAG.getNode(AArch64ISD::CSEL, DL, LHS.getValueType(), LHS.getOperand(0), in performSETCCCombine()
23907 // setcc (srl x, imm), 0, ne ==> setcc (and x, (-1 << imm)), 0, ne in performSETCCCombine()
23912 EVT TstVT = LHS->getValueType(0); in performSETCCCombine()
23916 SDValue TST = DAG.getNode(ISD::AND, DL, TstVT, LHS->getOperand(0), in performSETCCCombine()
23922 // setcc (iN (bitcast (vNi1 X))), 0, (eq|ne) in performSETCCCombine()
23923 // ==> setcc (iN (zext (i1 (vecreduce_or (vNi1 X))))), 0, (eq|ne) in performSETCCCombine()
23930 EVT ToVT = LHS->getValueType(0); in performSETCCCombine()
23931 EVT FromVT = LHS->getOperand(0).getValueType(); in performSETCCCombine()
23936 DL, MVT::i1, LHS->getOperand(0)); in performSETCCCombine()
23943 // Try to perform the memcmp when the result is tested for [in]equality with 0 in performSETCCCombine()
23956 SDValue LHS = N->getOperand(0); in performFlagSettingCombine()
23958 EVT VT = N->getValueType(0); in performFlagSettingCombine()
23963 return DCI.DAG.getMergeValues({Res, DCI.DAG.getConstant(0, DL, MVT::i32)}, in performFlagSettingCombine()
23970 DCI.CombineTo(Generic, SDValue(N, 0)); in performFlagSettingCombine()
23977 // (sign_extend (extract_subvector (setcc_merge_zero ... pred ...))), 0, ne in performSetCCPunpkCombine()
23979 SDValue Pred = N->getOperand(0); in performSetCCPunpkCombine()
23988 SDValue Extract = LHS->getOperand(0); in performSetCCPunpkCombine()
23990 Extract->getValueType(0) != N->getValueType(0) || in performSetCCPunpkCombine()
23991 Extract->getConstantOperandVal(1) != 0) in performSetCCPunpkCombine()
23994 SDValue InnerSetCC = Extract->getOperand(0); in performSetCCPunpkCombine()
24002 SDValue InnerPred = InnerSetCC.getOperand(0); in performSetCCPunpkCombine()
24005 Pred.getConstantOperandVal(0) == InnerPred.getConstantOperandVal(0) && in performSetCCPunpkCombine()
24006 Pred->getConstantOperandVal(0) >= AArch64SVEPredPattern::vl1 && in performSetCCPunpkCombine()
24007 Pred->getConstantOperandVal(0) <= AArch64SVEPredPattern::vl256) in performSetCCPunpkCombine()
24019 SDValue Pred = N->getOperand(0); in performSetccMergeZeroCombine()
24029 LHS->getOperand(0)->getValueType(0) == N->getValueType(0)) { in performSetccMergeZeroCombine()
24031 // pred, extend(setcc_merge_zero(pred, ...)), != splat(0)) in performSetccMergeZeroCombine()
24033 if (LHS->getOperand(0)->getOpcode() == AArch64ISD::SETCC_MERGE_ZERO && in performSetccMergeZeroCombine()
24034 LHS->getOperand(0)->getOperand(0) == Pred) in performSetccMergeZeroCombine()
24035 return LHS->getOperand(0); in performSetccMergeZeroCombine()
24038 // all_active, extend(nxvNi1 ...), != splat(0)) in performSetccMergeZeroCombine()
24041 return LHS->getOperand(0); in performSetccMergeZeroCombine()
24044 // pred, extend(nxvNi1 ...), != splat(0)) in performSetccMergeZeroCombine()
24049 return DAG.getNode(ISD::AND, SDLoc(N), N->getValueType(0), in performSetccMergeZeroCombine()
24050 LHS->getOperand(0), Pred); in performSetccMergeZeroCombine()
24067 // already been taken care of (e.g. and of 0, test of undefined shifted bits, in getTestBitOperand()
24073 Bit < Op->getValueType(0).getSizeInBits()) { in getTestBitOperand()
24074 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); in getTestBitOperand()
24079 Bit < Op->getOperand(0).getValueSizeInBits()) { in getTestBitOperand()
24080 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); in getTestBitOperand()
24097 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); in getTestBitOperand()
24103 (Bit - C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) { in getTestBitOperand()
24105 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); in getTestBitOperand()
24112 if (Bit >= Op->getValueType(0).getSizeInBits()) in getTestBitOperand()
24113 Bit = Op->getValueType(0).getSizeInBits() - 1; in getTestBitOperand()
24114 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); in getTestBitOperand()
24118 if ((Bit + C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) { in getTestBitOperand()
24120 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); in getTestBitOperand()
24128 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); in getTestBitOperand()
24155 return DAG.getNode(NewOpc, DL, MVT::Other, N->getOperand(0), NewTestSrc, in performTBZCombine()
24167 auto NTy = N->getValueType(0); in trySwapVSelectOperands()
24171 SDValue SetCC = N->getOperand(0); in trySwapVSelectOperands()
24183 if (SelectA != SelectB.getOperand(0)) in trySwapVSelectOperands()
24188 ISD::getSetCCInverse(CC, SetCC.getOperand(0).getValueType()); in trySwapVSelectOperands()
24190 DAG.getSetCC(SDLoc(SetCC), SetCC.getValueType(), SetCC.getOperand(0), in trySwapVSelectOperands()
24206 SDValue N0 = N->getOperand(0); in performVSelectCombine()
24218 SDValue SetCC = N->getOperand(0); in performVSelectCombine()
24221 SDValue CmpLHS = SetCC.getOperand(0); in performVSelectCombine()
24247 EVT CmpVT = N0.getOperand(0).getValueType(); in performVSelectCombine()
24254 EVT ResVT = N->getValueType(0); in performVSelectCombine()
24263 N0.getOperand(0), N0.getOperand(1), in performVSelectCombine()
24276 SDValue N0 = N->getOperand(0); in performSelectCombine()
24277 EVT ResVT = N->getValueType(0); in performSelectCombine()
24291 // If NumMaskElts == 0, the comparison is larger than select result. The in performSelectCombine()
24294 EVT SrcVT = N0.getOperand(0).getValueType(); in performSelectCombine()
24304 if (!ResVT.isVector() || NumMaskElts == 0) in performSelectCombine()
24320 // First perform a vector comparison, where lane 0 is the one we're interested in performSelectCombine()
24324 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0)); in performSelectCombine()
24330 SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0); in performSelectCombine()
24340 EVT VT = N->getValueType(0); in performDUPCombine()
24349 return DCI.DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SDValue(LN, 0), in performDUPCombine()
24350 DCI.DAG.getConstant(0, DL, MVT::i64)); in performDUPCombine()
24359 // t21: i32 = extract_vector_elt t19, Constant:i64<0> in performDUPCombine()
24362 // t22: v4i32 = AArch64ISD::DUPLANE32 t19, Constant:i64<0> in performDUPCombine()
24363 SDValue EXTRACT_VEC_ELT = N->getOperand(0); in performDUPCombine()
24365 if (VT == EXTRACT_VEC_ELT.getOperand(0).getValueType()) { in performDUPCombine()
24367 return DCI.DAG.getNode(Opcode, DL, VT, EXTRACT_VEC_ELT.getOperand(0), in performDUPCombine()
24381 if (N->getValueType(0) == N->getOperand(0).getValueType()) in performNVCASTCombine()
24382 return N->getOperand(0); in performNVCASTCombine()
24383 if (N->getOperand(0).getOpcode() == AArch64ISD::NVCAST) in performNVCASTCombine()
24384 return DAG.getNode(AArch64ISD::NVCAST, SDLoc(N), N->getValueType(0), in performNVCASTCombine()
24385 N->getOperand(0).getOperand(0)); in performNVCASTCombine()
24405 auto *C = dyn_cast<ConstantSDNode>(N->getOperand(0)); in performGlobalAddressCombine()
24446 SDValue BR = N->getOperand(0); in performCTLZCombine()
24452 return DAG.getNode(ISD::CTTZ, DL, BR.getValueType(), BR.getOperand(0)); in performCTLZCombine()
24474 /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
24494 /// where <imm> = sizeof(<T>) * k, for k = 0, 1, ..., 31.
24506 const EVT SrcVT = Src->getValueType(0); in performScatterStoreCombine()
24557 // * in the range [0, 31 x #SizeInBytes], in performScatterStoreCombine()
24582 Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0); in performScatterStoreCombine()
24605 SDValue Ops[] = {N->getOperand(0), // Chain in performScatterStoreCombine()
24618 const EVT RetVT = N->getValueType(0); in performGatherLoadCombine()
24660 // * in the range [0, 31 x #SizeInBytes], in performGatherLoadCombine()
24690 Offset = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::nxv2i64, Offset).getValue(0); in performGatherLoadCombine()
24703 SDValue Ops[] = {N->getOperand(0), // Chain in performGatherLoadCombine()
24711 Load = DAG.getNode(ISD::TRUNCATE, DL, RetVT, Load.getValue(0)); in performGatherLoadCombine()
24716 Load = DAG.getNode(ISD::BITCAST, DL, RetVT, Load.getValue(0)); in performGatherLoadCombine()
24725 SDValue Src = N->getOperand(0); in performSignExtendInRegCombine()
24742 SDValue ExtOp = Src->getOperand(0); in performSignExtendInRegCombine()
24755 return DAG.getNode(SOpc, DL, N->getValueType(0), Ext); in performSignExtendInRegCombine()
24836 EVT DstVT = N->getValueType(0); in performSignExtendInRegCombine()
24840 for (unsigned I = 0; I < Src->getNumOperands(); ++I) in performSignExtendInRegCombine()
24848 return SDValue(N, 0); in performSignExtendInRegCombine()
24923 SDValue InsertVec = N->getOperand(0); in removeRedundantInsertVectorElt()
24937 SDValue ExtractVec = InsertElt.getOperand(0); in removeRedundantInsertVectorElt()
24947 if (N->getValueType(0) != ExtractVec.getValueType()) in removeRedundantInsertVectorElt()
24968 SDValue N0 = N->getOperand(0); in performFPExtendCombine()
24969 EVT VT = N->getValueType(0); in performFPExtendCombine()
24997 return SDValue(N, 0); // Return N so it doesn't get rechecked! in performFPExtendCombine()
25005 EVT VT = N->getValueType(0); in performBSPExpandForSVE()
25013 SDValue Mask = N->getOperand(0); in performBSPExpandForSVE()
25024 EVT VT = N->getValueType(0); in performDupLane128Combine()
25026 SDValue Insert = N->getOperand(0); in performDupLane128Combine()
25030 if (!Insert.getOperand(0).isUndef()) in performDupLane128Combine()
25035 if (IdxInsert != 0 || IdxDupLane != 0) in performDupLane128Combine()
25042 SDValue Subvec = Bitcast.getOperand(0); in performDupLane128Combine()
25065 SDValue LHS = N->getOperand(0); in tryCombineMULLWithUZP1()
25079 ExtractHigh = LHS.getOperand(0); in tryCombineMULLWithUZP1()
25086 ExtractHigh = RHS.getOperand(0); in tryCombineMULLWithUZP1()
25095 SDValue TruncHighOp = TruncHigh.getOperand(0); in tryCombineMULLWithUZP1()
25104 // t18: v4i16 = extract_subvector t2, Constant:i64<0> in tryCombineMULLWithUZP1()
25116 SDValue ExtractHighSrcVec = ExtractHigh.getOperand(0); in tryCombineMULLWithUZP1()
25143 if (ExtractLowUser->getOperand(0) == ExtractLow) { in tryCombineMULLWithUZP1()
25149 if (ExtractLowUser->getOperand(0).getOpcode() == ISD::TRUNCATE) in tryCombineMULLWithUZP1()
25150 TruncLow = ExtractLowUser->getOperand(0); in tryCombineMULLWithUZP1()
25163 HasFoundMULLow ? TruncLow.getOperand(0) : DAG.getUNDEF(UZP1VT); in tryCombineMULLWithUZP1()
25190 return SDValue(N, 0); in tryCombineMULLWithUZP1()
25212 // t35: i32 = extract_vector_elt t34, Constant:i64<0> in performScalarToVectorCombine()
25217 // t39: v2i32 = extract_subvector t34, Constant:i64<0> in performScalarToVectorCombine()
25222 EVT VT = N->getValueType(0); in performScalarToVectorCombine()
25226 SDValue ZEXT = N->getOperand(0); in performScalarToVectorCombine()
25230 SDValue EXTRACT_VEC_ELT = ZEXT.getOperand(0); in performScalarToVectorCombine()
25238 SDValue UADDLV = EXTRACT_VEC_ELT.getOperand(0); in performScalarToVectorCombine()
25241 UADDLV.getOperand(0).getValueType() != MVT::v8i8) in performScalarToVectorCombine()
25248 DAG.getConstant(0, DL, MVT::i64)); in performScalarToVectorCombine()
25291 APInt::getAllOnes(N->getValueType(0).getScalarSizeInBits()); in PerformDAGCombine()
25293 APInt::getAllOnes(N->getValueType(0).getVectorNumElements()); in PerformDAGCombine()
25296 SDValue(N, 0), DemandedBits, DemandedElts, DCI)) in PerformDAGCombine()
25573 N->getOperand(0), DAG.getConstant(Register, DL, MVT::i64)); in PerformDAGCombine()
25575 AArch64ISD::CSINC, DL, MVT::i32, DAG.getConstant(0, DL, MVT::i32), in PerformDAGCombine()
25576 DAG.getConstant(0, DL, MVT::i32), in PerformDAGCombine()
25583 DAG.getVTList(MVT::Other), N->getOperand(0), in PerformDAGCombine()
25587 DAG.getVTList(MVT::Other), N->getOperand(0), in PerformDAGCombine()
25611 if (!N->hasNUsesOfValue(1, 0)) in isUsedByReturnOnly()
25622 TCChain = Copy->getOperand(0); in isUsedByReturnOnly()
25688 if (ValOnlyUser && ValOnlyUser->getValueType(0).isScalableVector() && in getIndexedAddressParts()
25694 Base = Op->getOperand(0); in getIndexedAddressParts()
25705 Offset = DAG.getConstant(RHSC, SDLoc(N), RHS->getValueType(0)); in getIndexedAddressParts()
25760 SDValue Op = N->getOperand(0); in replaceBoolVectorBitcast()
25761 EVT VT = N->getValueType(0); in replaceBoolVectorBitcast()
25769 if (Op.getOpcode() == ISD::CONCAT_VECTORS && !Op.getOperand(0).isUndef()) { in replaceBoolVectorBitcast()
25775 Op = Op.getOperand(0); in replaceBoolVectorBitcast()
25788 SDValue Op = N->getOperand(0); in CustomNonLegalBITCASTResults()
25789 EVT VT = N->getValueType(0); in CustomNonLegalBITCASTResults()
25794 SDValue IdxZero = DAG.getVectorIdxConstant(0, DL); in CustomNonLegalBITCASTResults()
25802 SDValue Op = N->getOperand(0); in ReplaceBITCASTResults()
25803 EVT VT = N->getValueType(0); in ReplaceBITCASTResults()
25854 EVT VT = N->getValueType(0); in ReplaceAddWithADDP()
25862 SDValue X = N->getOperand(0); in ReplaceAddWithADDP()
25865 Shuf = dyn_cast<ShuffleVectorSDNode>(N->getOperand(0)); in ReplaceAddWithADDP()
25871 if (Shuf->getOperand(0) != X || !Shuf->getOperand(1)->isUndef()) in ReplaceAddWithADDP()
25874 // Check the mask is 1,0,3,2,5,4,... in ReplaceAddWithADDP()
25876 for (int I = 0, E = Mask.size(); I < E; I++) in ReplaceAddWithADDP()
25877 if (Mask[I] != (I % 2 == 0 ? I + 1 : I - 1)) in ReplaceAddWithADDP()
25888 for (unsigned I = 0, E = VT.getVectorNumElements() / 2; I < E; I++) { in ReplaceAddWithADDP()
25906 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); in ReplaceReductionResults()
25907 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); in ReplaceReductionResults()
25915 SDValue In = N->getOperand(0); in ReplaceExtractSubVectorResults()
25923 EVT VT = N->getValueType(0); in ReplaceExtractSubVectorResults()
25937 if ((Index != 0) && (Index != ResEC.getKnownMinValue())) in ReplaceExtractSubVectorResults()
25940 unsigned Opcode = (Index == 0) ? AArch64ISD::UUNPKLO : AArch64ISD::UUNPKHI; in ReplaceExtractSubVectorResults()
25943 SDValue Half = DAG.getNode(Opcode, DL, ExtendedHalfVT, N->getOperand(0)); in ReplaceExtractSubVectorResults()
25959 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); in createGPRPairNode()
25966 assert(N->getValueType(0) == MVT::i128 && in ReplaceCMP_SWAP_128Results()
25977 N->getOperand(0), // Chain in in ReplaceCMP_SWAP_128Results()
26007 SDValue(CmpSwap, 0)); in ReplaceCMP_SWAP_128Results()
26009 SDValue(CmpSwap, 0)); in ReplaceCMP_SWAP_128Results()
26039 New.first, New.second, N->getOperand(0)}; in ReplaceCMP_SWAP_128Results()
26046 SDValue(CmpSwap, 0), SDValue(CmpSwap, 1))); in ReplaceCMP_SWAP_128Results()
26137 assert(N->getValueType(0) == MVT::i128 && in ReplaceATOMIC_LOAD_128Results()
26144 const SDValue &Chain = N->getOperand(0); in ReplaceATOMIC_LOAD_128Results()
26166 std::swap(Ops[0], Ops[1]); in ReplaceATOMIC_LOAD_128Results()
26174 SDValue Lo = SDValue(AtomicInst, 0), Hi = SDValue(AtomicInst, 1); in ReplaceATOMIC_LOAD_128Results()
26195 Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG)); in ReplaceNodeResults()
26204 if (SDValue Result = LowerCTPOP_PARITY(SDValue(N, 0), DAG)) in ReplaceNodeResults()
26226 if (useSVEForFixedLengthVectorVT(SDValue(N, 0).getValueType())) in ReplaceNodeResults()
26228 LowerToPredicatedOp(SDValue(N, 0), DAG, AArch64ISD::MULHS_PRED)); in ReplaceNodeResults()
26231 if (useSVEForFixedLengthVectorVT(SDValue(N, 0).getValueType())) in ReplaceNodeResults()
26233 LowerToPredicatedOp(SDValue(N, 0), DAG, AArch64ISD::MULHU_PRED)); in ReplaceNodeResults()
26239 assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion"); in ReplaceNodeResults()
26246 assert(N->getValueType(0) != MVT::i128 && in ReplaceNodeResults()
26280 Result.getValue(0), Result.getValue(1)); in ReplaceNodeResults()
26292 if (SDValue(N, 0).getValueType() == MVT::i128) { in ReplaceNodeResults()
26306 unsigned FirstRes = DAG.getDataLayout().isBigEndian() ? 1 : 0; in ReplaceNodeResults()
26325 EVT VT = N->getValueType(0); in ReplaceNodeResults()
26328 static_cast<Intrinsic::ID>(N->getConstantOperandVal(0)); in ReplaceNodeResults()
26388 assert(N->getValueType(0) == MVT::i128 && in ReplaceNodeResults()
26390 SDValue Chain = N->getOperand(0); in ReplaceNodeResults()
26397 // Sysregs are not endian. Result.getValue(0) always contains the lower half in ReplaceNodeResults()
26400 Result.getValue(0), Result.getValue(1)); in ReplaceNodeResults()
26663 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); in emitLoadLinked()
26680 0, Attribute::get(Builder.getContext(), Attribute::ElementType, ValueTy)); in emitLoadLinked()
26723 Val, Stxr->getFunctionType()->getParamType(0)), in emitStoreConditional()
26756 IRB.getPtrTy(0)); in UseTlsOffset()
26764 return UseTlsOffset(IRB, 0x28); in getIRStackGuard()
26769 return UseTlsOffset(IRB, -0x10); in getIRStackGuard()
26788 F->addParamAttr(0, Attribute::AttrKind::InReg); in insertSSPDeclarations()
26815 return UseTlsOffset(IRB, 0x48); in getSafeStackPointerLocation()
26820 return UseTlsOffset(IRB, -0x8); in getSafeStackPointerLocation()
26955 MachineOperand &Target = MBBI->getOperand(0); in EmitKCFICheck()
26986 for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) { in finalizeLowering()
27036 unsigned AdditionalCost = 0; in shouldLocalize()
27041 LLT Ty = MRI.getType(MI.getOperand(0).getReg()); in shouldLocalize()
27064 Register Reg = MI.getOperand(0).getReg(); in shouldLocalize()
27093 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) in fallBackToDAGISel()
27210 SDValue Zero = DAG.getConstant(0, DL, MVT::i64); in convertToScalableVector()
27221 SDValue Zero = DAG.getConstant(0, DL, MVT::i64); in convertFromScalableVector()
27277 auto Op2 = DAG.getConstant(0, DL, ContainerVT); in convertFixedMaskToScalableVector()
27310 PassThru = DAG.getConstant(0, DL, ContainerVT); in LowerFixedLengthVectorMLoadToSVE()
27312 PassThru = DAG.getConstantFP(0, DL, ContainerVT); in LowerFixedLengthVectorMLoadToSVE()
27352 NewValue, DAG.getTargetConstant(0, DL, MVT::i64), in LowerFixedLengthVectorStoreToSVE()
27398 SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0)); in LowerFixedLengthVectorIntDivideToSVE()
27406 DAG.getConstant(0, dl, ContainerVT), Res); in LowerFixedLengthVectorIntDivideToSVE()
27423 SDValue Op0 = DAG.getNode(ExtendOpcode, dl, WideVT, Op.getOperand(0)); in LowerFixedLengthVectorIntDivideToSVE()
27431 SDValue IdxZero = DAG.getConstant(0, dl, MVT::i64); in LowerFixedLengthVectorIntDivideToSVE()
27442 auto [Op0LoExt, Op0HiExt] = HalveAndExtendVector(Op.getOperand(0)); in LowerFixedLengthVectorIntDivideToSVE()
27457 SDValue Val = Op.getOperand(0); in LowerFixedLengthVectorIntExtendToSVE()
27493 SDValue Val = Op.getOperand(0); in LowerFixedLengthVectorTruncateToSVE()
27526 EVT InVT = Op.getOperand(0).getValueType(); in LowerFixedLengthExtractVectorElt()
27531 SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0)); in LowerFixedLengthExtractVectorElt()
27542 EVT InVT = Op.getOperand(0).getValueType(); in LowerFixedLengthInsertVectorElt()
27544 SDValue Op0 = convertToScalableVector(DAG, ContainerVT, Op->getOperand(0)); in LowerFixedLengthInsertVectorElt()
27644 SDValue AccOp = ScalarOp.getOperand(0); in LowerVECREDUCE_SEQ_FADD()
27656 SDValue Zero = DAG.getConstant(0, DL, MVT::i64); in LowerVECREDUCE_SEQ_FADD()
27672 SDValue Op = ReduceOp.getOperand(0); in LowerPredReductionToSVE()
27716 SDValue VecOp = ScalarOp.getOperand(0); in LowerReductionToSVE()
27736 Rdx, DAG.getConstant(0, DL, MVT::i64)); in LowerReductionToSVE()
27758 EVT MaskVT = Op.getOperand(0).getValueType(); in LowerFixedLengthVectorSelectToSVE()
27760 auto Mask = convertToScalableVector(DAG, MaskContainerVT, Op.getOperand(0)); in LowerFixedLengthVectorSelectToSVE()
27773 EVT InVT = Op.getOperand(0).getValueType(); in LowerFixedLengthVectorSetccToSVE()
27781 auto Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0)); in LowerFixedLengthVectorSetccToSVE()
27798 auto SrcOp = Op.getOperand(0); in LowerFixedLengthBitcastToSVE()
27817 auto SrcOp1 = Op.getOperand(0); in LowerFixedLengthConcatVectorsToSVE()
27825 for (unsigned I = 0; I < NumOperands; I += 2) in LowerFixedLengthConcatVectorsToSVE()
27850 SDValue Val = Op.getOperand(0); in LowerFixedLengthFPExtendToSVE()
27875 SDValue Val = Op.getOperand(0); in LowerFixedLengthFPRoundToSVE()
27903 SDValue Val = Op.getOperand(0); in LowerFixedLengthIntToFPToSVE()
27943 SDValue Even = DAG.getNode(AArch64ISD::UZP1, DL, OpVT, Op.getOperand(0), in LowerVECTOR_DEINTERLEAVE()
27945 SDValue Odd = DAG.getNode(AArch64ISD::UZP2, DL, OpVT, Op.getOperand(0), in LowerVECTOR_DEINTERLEAVE()
27957 SDValue Lo = DAG.getNode(AArch64ISD::ZIP1, DL, OpVT, Op.getOperand(0), in LowerVECTOR_INTERLEAVE()
27959 SDValue Hi = DAG.getNode(AArch64ISD::ZIP2, DL, OpVT, Op.getOperand(0), in LowerVECTOR_INTERLEAVE()
27987 SDValue Zero = DAG.getConstant(0, DL, MVT::i64); in LowerVECTOR_HISTOGRAM()
28033 SDValue Val = Op.getOperand(0); in LowerFixedLengthFPToIntToSVE()
28083 EVT VTOp1 = Op.getOperand(0).getValueType(); in GenerateFixedLengthSVETBL()
28106 if (Index < 0) in GenerateFixedLengthSVETBL()
28107 Index = 0; in GenerateFixedLengthSVETBL()
28120 AddRuntimeVLMask.push_back(DAG.getConstant(0, DL, MVT::i64)); in GenerateFixedLengthSVETBL()
28133 for (unsigned i = 0; i < IndexLen - ElementsPerVectorReg; ++i) { in GenerateFixedLengthSVETBL()
28136 AddRuntimeVLMask.push_back(DAG.getConstant(0, DL, MVT::i64)); in GenerateFixedLengthSVETBL()
28186 SDValue Op1 = Op.getOperand(0); in LowerFixedLengthVECTOR_SHUFFLEToSVE()
28200 unsigned Lane = std::max(0, SVN->getSplatIndex()); in LowerFixedLengthVECTOR_SHUFFLEToSVE()
28256 WhichResult == 0) in LowerFixedLengthVECTOR_SHUFFLEToSVE()
28261 unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; in LowerFixedLengthVECTOR_SHUFFLEToSVE()
28266 if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult == 0) in LowerFixedLengthVECTOR_SHUFFLEToSVE()
28271 unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; in LowerFixedLengthVECTOR_SHUFFLEToSVE()
28304 WhichResult != 0) in LowerFixedLengthVECTOR_SHUFFLEToSVE()
28309 unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; in LowerFixedLengthVECTOR_SHUFFLEToSVE()
28314 if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult) && WhichResult != 0) in LowerFixedLengthVECTOR_SHUFFLEToSVE()
28319 unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; in LowerFixedLengthVECTOR_SHUFFLEToSVE()
28396 SDValue ShiftR = Op->getOperand(0); in SimplifyDemandedBitsForTargetNode()
28422 return TLO.CombineTo(Op, ShiftR->getOperand(0)); in SimplifyDemandedBitsForTargetNode()
28426 SDValue Op0 = Op.getOperand(0); in SimplifyDemandedBitsForTargetNode()
28470 Op.getOperand(0).getOpcode() == AArch64ISD::DUP) || in isTargetCanonicalConstantNode()
28528 auto *LowerSplitA = B.CreateExtractVector(HalfTy, InputA, B.getInt64(0)); in createComplexDeinterleavingIR()
28529 auto *LowerSplitB = B.CreateExtractVector(HalfTy, InputB, B.getInt64(0)); in createComplexDeinterleavingIR()
28537 LowerSplitAcc = B.CreateExtractVector(HalfTy, Accumulator, B.getInt64(0)); in createComplexDeinterleavingIR()
28547 B.getInt64(0)); in createComplexDeinterleavingIR()
28664 assert(RegisterVT.getFixedSizeInBits() % 128 == 0 && "Unexpected size!"); in getVectorTypeBreakdownForCallingConv()
28736 EVT VT = N->getValueType(0); in verifyTargetSDNode()
28737 EVT OpVT = N->getOperand(0).getValueType(); in verifyTargetSDNode()
28755 EVT VT = N->getValueType(0); in verifyTargetSDNode()
28756 EVT Op0VT = N->getOperand(0).getValueType(); in verifyTargetSDNode()