Lines Matching refs:hasAVX2

3358   if (Subtarget.hasAVX2())  in shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd()
4230 } else if (Subtarget.hasAVX2()) { in SplitOpsAndApply()
7113 if (!Subtarget.hasAVX2() && ScalarSize < 32) in EltsFromConsecutiveLoads()
7157 if (!Subtarget.hasAVX2() && in EltsFromConsecutiveLoads()
7358 (SplatBitSize < 32 && Subtarget.hasAVX2())) { in lowerBuildVectorAsBroadcast()
7432 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) { in lowerBuildVectorAsBroadcast()
7442 (CVT == MVT::f16 && Subtarget.hasAVX2()) || in lowerBuildVectorAsBroadcast()
7443 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) { in lowerBuildVectorAsBroadcast()
8249 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) { in LowerToHorizontalOp()
8621 if (Subtarget.hasAVX2()) in createVariablePermute()
9053 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) { in LowerBUILD_VECTOR()
9847 (Subtarget.hasAVX2() && VT.is256BitVector()) || in lowerShuffleWithPSHUFB()
10668 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!"); in lowerShuffleAsBlend()
10683 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!"); in lowerShuffleAsBlend()
10712 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!"); in lowerShuffleAsBlend()
11043 (VT.is256BitVector() && !Subtarget.hasAVX2()) || in lowerShuffleAsByteRotateAndPermute()
11189 if (!Subtarget.hasAVX2() && (!Subtarget.hasAVX() || EltSizeInBits < 32 || in lowerShuffleAsDecomposedShuffleMerge()
12346 assert(Subtarget.hasAVX2() && in lowerShuffleAsTruncBroadcast()
12492 (Subtarget.hasAVX2() && (VT.isInteger() || EltVT == MVT::f16)))) in lowerShuffleAsBroadcast()
12498 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2()) in lowerShuffleAsBroadcast()
12501 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2(); in lowerShuffleAsBroadcast()
12826 if (Subtarget.hasAVX2()) in lowerV2F64Shuffle()
12906 if (Subtarget.hasAVX2()) in lowerV2I64Shuffle()
13124 if (Subtarget.hasAVX2()) in lowerV4F32Shuffle()
13223 if (Subtarget.hasAVX2()) in lowerV4I32Shuffle()
13994 if (NumEvenDrops == 2 && Subtarget.hasAVX2() && in lowerV8I16Shuffle()
14764 bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef(); in lowerShuffleAsLanePermuteAndPermute()
14900 if (!Subtarget.hasAVX2()) { in lowerShuffleAsLanePermuteAndShuffle()
14960 if (Subtarget.hasAVX2()) in lowerV2X128Shuffle()
15386 if (Subtarget.hasAVX2()) { in lowerShuffleWithUndefHalf()
15417 if (Subtarget.hasAVX2() && EltWidth == 64) in lowerShuffleWithUndefHalf()
15444 if (Subtarget.hasAVX2()) { in lowerShuffleAsRepeatedMaskAndLanePermute()
15618 if (Subtarget.hasAVX2() && VT.is256BitVector()) { in lowerShuffleAsRepeatedMaskAndLanePermute()
15859 if (Subtarget.hasAVX2()) in lowerV4F64Shuffle()
15921 if (!(Subtarget.hasAVX2() && (V1IsInPlace || V2IsInPlace))) in lowerV4F64Shuffle()
15934 if (Subtarget.hasAVX2()) in lowerV4F64Shuffle()
15954 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!"); in lowerV4I64Shuffle()
16077 if (!Subtarget.hasAVX2()) { in lowerV8F32Shuffle()
16129 if (Subtarget.hasAVX2()) { in lowerV8F32Shuffle()
16155 if (Subtarget.hasAVX2() && !Subtarget.hasAVX512()) in lowerV8F32Shuffle()
16169 if (Subtarget.hasAVX2()) in lowerV8F32Shuffle()
16189 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!"); in lowerV8I32Shuffle()
16331 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!"); in lowerV16I16Shuffle()
16454 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!"); in lowerV32I8Shuffle()
16594 if (VT.isInteger() && !Subtarget.hasAVX2()) { in lower256BitShuffle()
17851 if (EltSize < 32 && VT.is256BitVector() && !Subtarget.hasAVX2() && in LowerVSELECT()
17872 if (Subtarget.hasAVX2()) in LowerVSELECT()
18297 (Subtarget.hasAVX2() && (EltVT == MVT::i32 || EltVT == MVT::i64))) { in LowerINSERT_VECTOR_ELT()
18312 ((Subtarget.hasAVX2() && EltSizeInBits != 8) || in LowerINSERT_VECTOR_ELT()
20844 if (VT == MVT::v8i32 && !Subtarget.hasAVX2()) { in expandFP_TO_UINT_SSE()
29449 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) { in convertShiftLeftToScale()
30040 !Subtarget.hasAVX2())) || in LowerFunnelShift()
30217 if (VT.is256BitVector() && (Subtarget.hasXOP() || !Subtarget.hasAVX2())) in LowerRotate()
30259 Subtarget.hasAVX2()) || in LowerRotate()
30406 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) { in LowerRotate()
32130 assert(Subtarget.hasAVX2() && in LowerMGATHER()
34137 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64)) in isVectorShiftByScalarCheap()
34406 if (!Subtarget.hasAVX2()) in isVectorClearMaskLegal()
37995 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) { in matchUnaryPermuteShuffle()
38053 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) || in matchUnaryPermuteShuffle()
38101 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) || in matchUnaryPermuteShuffle()
38218 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) || in matchBinaryShuffle()
38224 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2()) in matchBinaryShuffle()
38351 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) || in matchBinaryPermuteShuffle()
38365 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) { in matchBinaryPermuteShuffle()
38548 (RootVT.is256BitVector() && !Subtarget.hasAVX2()); in combineX86ShuffleChain()
38679 !Subtarget.hasAVX2()) { in combineX86ShuffleChain()
38695 !(Subtarget.hasAVX2() && isUndefOrInRange(Mask, 0, 2)) && in combineX86ShuffleChain()
38771 (!MaskVT.is256BitVector() || Subtarget.hasAVX2()); in combineX86ShuffleChain()
38781 if ((Subtarget.hasAVX2() || in combineX86ShuffleChain()
38794 if (Subtarget.hasAVX2()) { in combineX86ShuffleChain()
38989 if (Subtarget.hasAVX2() && in combineX86ShuffleChain()
39148 (RootVT.is256BitVector() && Subtarget.hasAVX2()) || in combineX86ShuffleChain()
40121 if (Ops.size() != 2 || !Subtarget.hasAVX2() || RootSizeInBits != 128 || in combineX86ShufflesRecursively()
40456 if (VT.is256BitVector() && !Subtarget.hasAVX2() && in combineBlendOfPermutes()
40831 assert(Subtarget.hasAVX2() && "Expected AVX2"); in combineTargetShuffle()
41680 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N)) in combineShuffleOfConcatUndef()
43554 checkBitcastSrcVectorSize(Src, 256, Subtarget.hasAVX2())) { in combineBitcastvxi1()
45407 } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits && in combineToExtendBoolVectorInReg()
45703 if (VT == MVT::v32i8 && !Subtarget.hasAVX2()) in combineVSelectToBLENDV()
46379 (Subtarget.hasAVX2() && EltBitWidth == 64) || in combineSelect()
50916 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break; in foldVectorXorShiftIntoCmp()
52070 if (!IsIdentityPostShuffle && !Subtarget.hasAVX2() && VT.isFloatingPoint() && in isHorizontalBinOp()
54429 Subtarget.hasAVX2()) && in combineSetCC()
56105 (Subtarget.hasAVX2() || in combineConcatVectorOps()
56115 (Subtarget.hasAVX2() || in combineConcatVectorOps()
57014 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() && in combineEXTRACT_SUBVECTOR()