Lines Matching +full:128 +full:mib

51   const LLT s128 = LLT::scalar(128);  in AArch64LegalizerInfo()
69 std::initializer_list<LLT> PackedVectorAllTypeList = {/* Begin 128bit types */ in AArch64LegalizerInfo()
72 /* End 128bit types */ in AArch64LegalizerInfo()
113 // Maximum: sN * k = 128 in AArch64LegalizerInfo()
341 // 128 bit base sizes in AArch64LegalizerInfo()
352 // 128 bit base sizes in AArch64LegalizerInfo()
435 // Maximum: sN * k = 128 in AArch64LegalizerInfo()
586 if (DstSize < 8 || DstSize >= 128 || !isPowerOf2_32(DstSize)) in AArch64LegalizerInfo()
592 // the source type is below 128 bits. We shouldn't be allowing anything in AArch64LegalizerInfo()
631 return DstTy.isVector() && SrcTy.getSizeInBits() > 128 && in AArch64LegalizerInfo()
796 // Same for 128-bit width type, except they are on the FPR bank. in AArch64LegalizerInfo()
833 return Query.Types[0].getSizeInBits() == 128 && in AArch64LegalizerInfo()
871 case 128: in AArch64LegalizerInfo()
903 // cause the total vec size to be > 128b. in AArch64LegalizerInfo()
1028 return Query.Types[0].getSizeInBits() <= 128 && in AArch64LegalizerInfo()
1079 // TODO: Fix suboptimal codegen for 128+ bit types. in AArch64LegalizerInfo()
1081 return SrcTy.isScalar() && SrcTy.getSizeInBits() < 128; in AArch64LegalizerInfo()
1112 // clamp to 128 bit vectors then to 64bit vectors to produce a cascade of in AArch64LegalizerInfo()
1417 (DstTy.getSizeInBits() != 64 && DstTy.getSizeInBits() != 128)) in legalizeICMP()
1525 MachineIRBuilder MIB(MI); in legalizeIntrinsic() local
1526 MIB.buildLoad(Val, MI.getOperand(2), in legalizeIntrinsic()
1530 MIB.buildStore(Val, MI.getOperand(1), in legalizeIntrinsic()
1538 MachineIRBuilder &MIB = Helper.MIRBuilder; in legalizeIntrinsic() local
1539 MIB.buildConstant(MI.getOperand(0).getReg(), 0); in legalizeIntrinsic()
1547 MachineIRBuilder MIB(MI); in legalizeIntrinsic() local
1549 Register ExtValueReg = MIB.buildAnyExt(LLT::scalar(64), Value).getReg(0); in legalizeIntrinsic()
1554 MachineIRBuilder MIB(MI); in legalizeIntrinsic() local
1567 MIB.buildInstr(AArch64::G_AARCH64_PREFETCH).addImm(PrfOp).add(AddrVal); in legalizeIntrinsic()
1577 MachineIRBuilder MIB(MI); in legalizeIntrinsic() local
1578 MachineRegisterInfo &MRI = *MIB.getMRI(); in legalizeIntrinsic()
1595 MIB.setInsertPt(MIB.getMBB(), ++MIB.getInsertPt()); in legalizeIntrinsic()
1596 MIB.buildExtOrTrunc(IsSigned ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT, in legalizeIntrinsic()
1603 MachineIRBuilder MIB(MI); in legalizeIntrinsic() local
1608 MIB.buildInstr(Opc, {MI.getOperand(0)}, {MI.getOperand(2)}); in legalizeIntrinsic()
1615 MachineIRBuilder MIB(MI); in legalizeIntrinsic() local
1616 MachineRegisterInfo &MRI = *MIB.getMRI(); in legalizeIntrinsic()
1635 MIB.buildInstr(Opc, {MidTy}, {SrcReg})->getOperand(0).getReg(); in legalizeIntrinsic()
1637 MIB.buildConstant(LLT::scalar(64), 0)->getOperand(0).getReg(); in legalizeIntrinsic()
1638 Register ExtReg = MIB.buildInstr(AArch64::G_EXTRACT_VECTOR_ELT, {ExtTy}, in legalizeIntrinsic()
1643 MIB.buildTrunc(DstReg, ExtReg); in legalizeIntrinsic()
1645 MIB.buildCopy(DstReg, ExtReg); in legalizeIntrinsic()
1659 MachineIRBuilder MIB(MI); in legalizeIntrinsic() local
1661 MIB.buildSMax(MI.getOperand(0), MI.getOperand(2), MI.getOperand(3)); in legalizeIntrinsic()
1663 MIB.buildSMin(MI.getOperand(0), MI.getOperand(2), MI.getOperand(3)); in legalizeIntrinsic()
1665 MIB.buildUMax(MI.getOperand(0), MI.getOperand(2), MI.getOperand(3)); in legalizeIntrinsic()
1667 MIB.buildUMin(MI.getOperand(0), MI.getOperand(2), MI.getOperand(3)); in legalizeIntrinsic()
1669 MIB.buildInstr(TargetOpcode::G_FMAXIMUM, {MI.getOperand(0)}, in legalizeIntrinsic()
1672 MIB.buildInstr(TargetOpcode::G_FMINIMUM, {MI.getOperand(0)}, in legalizeIntrinsic()
1675 MIB.buildInstr(TargetOpcode::G_FMAXNUM, {MI.getOperand(0)}, in legalizeIntrinsic()
1678 MIB.buildInstr(TargetOpcode::G_FMINNUM, {MI.getOperand(0)}, in legalizeIntrinsic()
1747 if (ValTy == LLT::scalar(128)) { in legalizeLoadStore()
1890 // For 128 bit vector popcounts, we lower to the following sequence: in legalizeCTPOP()
1910 if (ST->hasCSSC() && Ty.isScalar() && Size == 128) { in legalizeCTPOP()
1934 LLT VTy = Size == 128 ? LLT::fixed_vector(16, 8) : LLT::fixed_vector(8, 8); in legalizeCTPOP()
1936 … assert((Size == 32 || Size == 64 || Size == 128) && "Expected only 32, 64, or 128 bit scalars!"); in legalizeCTPOP()
2006 if (Ty.isScalar() && (Size == 64 || Size == 128)) in legalizeCTPOP()
2026 // We have 128-bit CASP instructions taking XSeqPair registers, which are in legalizeAtomicCmpxchg128()
2029 // 128-bit known-regclass one with code like this: in legalizeAtomicCmpxchg128()
2053 LLT s128 = LLT::scalar(128); in legalizeAtomicCmpxchg128()
2192 MachineIRBuilder &MIB = Helper.MIRBuilder; in legalizePrefetch() local
2210 MIB.buildInstr(AArch64::G_AARCH64_PREFETCH).addImm(PrfOp).add(AddrVal); in legalizePrefetch()