/freebsd/contrib/llvm-project/llvm/lib/Target/DirectX/ |
H A D | CBufferDataLayout.cpp | 103 if (TypeSize ScalarSize = EltTy->getScalarType()->getPrimitiveSizeInBits()) in getStructLayout() local 104 Offset = alignTo(Offset, ScalarSize >> 3); in getStructLayout()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGenTypes/ |
H A D | LowLevelType.h | 128 static constexpr LLT scalarOrVector(ElementCount EC, uint64_t ScalarSize) { in scalarOrVector() argument 129 assert(ScalarSize <= std::numeric_limits<unsigned>::max() && in scalarOrVector() 131 return scalarOrVector(EC, LLT::scalar(static_cast<unsigned>(ScalarSize))); in scalarOrVector()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64PostLegalizerLowering.cpp | 708 unsigned ScalarSize = ScalarTy.getSizeInBits(); in matchDupLane() local 713 if (ScalarSize == 64) in matchDupLane() 715 else if (ScalarSize == 32) in matchDupLane() 719 if (ScalarSize == 32) in matchDupLane() 721 else if (ScalarSize == 16) in matchDupLane() 725 if (ScalarSize == 8) in matchDupLane() 727 else if (ScalarSize == 16) in matchDupLane() 731 if (ScalarSize == 8) in matchDupLane()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/ |
H A D | X86ShuffleDecode.h | 113 void decodeVSHUF64x2FamilyMask(unsigned NumElts, unsigned ScalarSize,
|
H A D | X86ShuffleDecode.cpp | 263 void decodeVSHUF64x2FamilyMask(unsigned NumElts, unsigned ScalarSize, in decodeVSHUF64x2FamilyMask() argument 266 unsigned NumElementsInLane = 128 / ScalarSize; in decodeVSHUF64x2FamilyMask()
|
H A D | X86InstComments.cpp | 239 static unsigned getRegOperandNumElts(const MCInst *MI, unsigned ScalarSize, in getRegOperandNumElts() argument 242 return getVectorRegSize(OpReg) / ScalarSize; in getRegOperandNumElts()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InterleavedAccess.cpp | 230 unsigned ScalarSize = VT.getVectorElementType().getScalarSizeInBits() * 2; in scaleVectorType() local 231 return MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), in scaleVectorType()
|
H A D | X86ISelLowering.cpp | 7112 unsigned ScalarSize = std::min(RepeatSize, 64u); in EltsFromConsecutiveLoads() local 7113 if (!Subtarget.hasAVX2() && ScalarSize < 32) in EltsFromConsecutiveLoads() 7118 if (RepeatSize > ScalarSize && SubElems == 1) in EltsFromConsecutiveLoads() 7141 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize) in EltsFromConsecutiveLoads() 7142 : EVT::getFloatingPointVT(ScalarSize); in EltsFromConsecutiveLoads() 7143 if (RepeatSize > ScalarSize) in EltsFromConsecutiveLoads() 7145 RepeatSize / ScalarSize); in EltsFromConsecutiveLoads() 7148 VT.getSizeInBits() / ScalarSize); in EltsFromConsecutiveLoads() 7153 if (RepeatSize > ScalarSize) { in EltsFromConsecutiveLoads() 7197 unsigned ScalarSize = VT.getScalarSizeInBits(); in getConstantVector() local [all …]
|
H A D | X86TargetTransformInfo.cpp | 5350 unsigned ScalarSize = ValVTy->getScalarSizeInBits(); in getArithmeticReductionCost() local 5354 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits()) in getArithmeticReductionCost() 5374 unsigned Size = NumVecElts * ScalarSize; in getArithmeticReductionCost() 5545 unsigned ScalarSize = ValTy->getScalarSizeInBits(); in getMinMaxReductionCost() local 5550 ScalarSize != MTy.getScalarSizeInBits()) in getMinMaxReductionCost() 5557 unsigned Size = NumVecElts * ScalarSize; in getMinMaxReductionCost()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/SystemZ/ |
H A D | SystemZTargetTransformInfo.cpp | 1298 unsigned ScalarSize = VTy->getScalarSizeInBits(); in getVectorIntrinsicInstrCost() local 1300 if (ScalarSize > SystemZ::VectorBits) in getVectorIntrinsicInstrCost() 1305 unsigned LastVectorHandling = (ScalarSize < 32) ? 3 : 2; in getVectorIntrinsicInstrCost()
|
/freebsd/contrib/llvm-project/llvm/lib/Transforms/Vectorize/ |
H A D | VectorCombine.cpp | 162 uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits(); in canWidenLoad() local 164 if (!ScalarSize || !MinVectorSize || MinVectorSize % ScalarSize != 0 || in canWidenLoad() 165 ScalarSize % 8 != 0) in canWidenLoad() 190 uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits(); in vectorizeLoadInsert() local 200 unsigned MinVecNumElts = MinVectorSize / ScalarSize; in vectorizeLoadInsert() 221 uint64_t ScalarSizeInBytes = ScalarSize / 8; in vectorizeLoadInsert()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/MIRParser/ |
H A D | MIParser.cpp | 1927 auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue(); in parseLowLevelType() local 1928 if (ScalarSize) { in parseLowLevelType() 1929 if (!verifyScalarSize(ScalarSize)) in parseLowLevelType() 1931 Ty = LLT::scalar(ScalarSize); in parseLowLevelType() 1990 auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue(); in parseLowLevelType() local 1991 if (!verifyScalarSize(ScalarSize)) in parseLowLevelType() 1993 Ty = LLT::scalar(ScalarSize); in parseLowLevelType()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUCodeGenPrepare.cpp | 1559 unsigned ScalarSize = Ty->getScalarSizeInBits(); in visitBinaryOperator() local 1565 ScalarSize <= 64 && in visitBinaryOperator() 1580 if (ScalarSize <= 32) { in visitBinaryOperator() 1603 if (ScalarSize <= 32) in visitBinaryOperator()
|
H A D | SIISelLowering.cpp | 12024 auto ScalarSize = Op.getScalarValueSizeInBits(); in calculateByteProvider() local 12025 if (ScalarSize < 32) in calculateByteProvider() 12026 Index = ScalarSize == 8 ? VecIdx : VecIdx * 2 + Index; in calculateByteProvider() 12027 return calculateSrcByte(ScalarSize >= 32 ? Op : Op.getOperand(0), in calculateByteProvider()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | Utils.cpp | 1524 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits(); in isConstantOrConstantSplatVector() local 1525 return APInt(ScalarSize, *MaybeCst, true); in isConstantOrConstantSplatVector()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ |
H A D | SelectionDAG.cpp | 3186 unsigned ScalarSize = Op.getOperand(0).getScalarValueSizeInBits(); in computeKnownBits() local 3187 assert(ScalarSize * Op.getNumOperands() == BitWidth && in computeKnownBits() 3190 Known.insertBits(computeKnownBits(SrcOp, Depth + 1), ScalarSize * I); in computeKnownBits()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64ISelLowering.cpp | 28411 unsigned ScalarSize = Op.getScalarValueSizeInBits(); in SimplifyDemandedBitsForTargetNode() local 28412 assert(ScalarSize > ShiftLBits && "Invalid shift imm"); in SimplifyDemandedBitsForTargetNode() 28414 APInt ZeroBits = APInt::getLowBitsSet(ScalarSize, ShiftLBits); in SimplifyDemandedBitsForTargetNode()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/ |
H A D | RISCVISelLowering.cpp | 17464 unsigned ScalarSize = Scalar.getValueSizeInBits(); in PerformDAGCombine() 17466 if (ScalarSize > EltWidth && Passthru.isUndef()) in PerformDAGCombine() 17461 unsigned ScalarSize = Scalar.getValueSizeInBits(); PerformDAGCombine() local
|