/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/GISel/ |
H A D | PPCLegalizerInfo.cpp | 33 const int EltSize = QueryTy.getElementType().getSizeInBits(); in isRegisterType() local 34 return (EltSize == 8 || EltSize == 16 || EltSize == 32 || EltSize == 64); in isRegisterType()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/DirectX/ |
H A D | CBufferDataLayout.cpp | 81 TypeSize EltSize = getTypeAllocSize(AT->getElementType()); in getTypeAllocSize() local 82 TypeSize AlignedEltSize = alignTo4Dwords(EltSize); in getTypeAllocSize() 84 return TypeSize::getFixed(AlignedEltSize * (NumElts - 1) + EltSize); in getTypeAllocSize() 102 TypeSize EltSize = getTypeAllocSize(EltTy); in getStructLayout() local 107 Offset = Offset.getWithIncrement(EltSize); in getStructLayout()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64PerfectShuffle.h | 6702 inline bool isREVMask(ArrayRef<int> M, unsigned EltSize, unsigned NumElts, in isREVMask() argument 6711 BlockElts = BlockSize / EltSize; in isREVMask() 6713 if (BlockSize <= EltSize || BlockSize != BlockElts * EltSize) in isREVMask()
|
H A D | AArch64TargetTransformInfo.h | 326 unsigned EltSize = DataTypeTy->getElementType()->getScalarSizeInBits(); in isLegalNTStoreLoad() local 327 return NumElements > 1 && isPowerOf2_64(NumElements) && EltSize >= 8 && in isLegalNTStoreLoad() 328 EltSize <= 128 && isPowerOf2_64(EltSize); in isLegalNTStoreLoad()
|
H A D | AArch64RegisterInfo.td | 1720 class MatrixTileAsmOperand<string RC, int EltSize> : AsmOperandClass { 1721 let Name = "MatrixTile" # EltSize; 1727 # EltSize # ", AArch64::" # RC # "RegClassID>"; 1730 class MatrixTileOperand<int EltSize, int NumBitsForTile, RegisterClass RC> 1732 let ParserMatchClass = MatrixTileAsmOperand<!cast<string>(RC), EltSize>; 1745 class MatrixTileVectorAsmOperand<string RC, int EltSize, int IsVertical> 1747 let Name = "MatrixTileVector" # !if(IsVertical, "V", "H") # EltSize; 1754 # EltSize # ", AArch64::" # RC # "RegClassID>"; 1757 class MatrixTileVectorOperand<int EltSize, int NumBitsForTile, 1760 let ParserMatchClass = MatrixTileVectorAsmOperand<!cast<string>(RC), EltSize, [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/MCTargetDesc/ |
H A D | X86ShuffleDecode.cpp | 399 void DecodeEXTRQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx, in DecodeEXTRQIMask() argument 409 if (0 != (Len % EltSize) || 0 != (Idx % EltSize)) in DecodeEXTRQIMask() 423 Len /= EltSize; in DecodeEXTRQIMask() 424 Idx /= EltSize; in DecodeEXTRQIMask() 436 void DecodeINSERTQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx, in DecodeINSERTQIMask() argument 446 if (0 != (Len % EltSize) || 0 != (Idx % EltSize)) in DecodeINSERTQIMask() 460 Len /= EltSize; in DecodeINSERTQIMask() 461 Idx /= EltSize; in DecodeINSERTQIMask()
|
H A D | X86ShuffleDecode.h | 140 void DecodeEXTRQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx, 144 void DecodeINSERTQIMask(unsigned NumElts, unsigned EltSize, int Len, int Idx,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SILoadStoreOptimizer.cpp | 110 unsigned EltSize; member 236 unsigned read2Opcode(unsigned EltSize) const; 237 unsigned read2ST64Opcode(unsigned EltSize) const; 242 unsigned write2Opcode(unsigned EltSize) const; 243 unsigned write2ST64Opcode(unsigned EltSize) const; 778 EltSize = in setMI() 783 EltSize = in setMI() 790 EltSize = AMDGPU::convertSMRDOffsetUnits(*LSO.STM, 4); in setMI() 793 EltSize = 4; in setMI() 1008 if ((CI.Offset % CI.EltSize != 0) || (Paired.Offset % CI.EltSize != 0)) in offsetsCanBeCombined() [all …]
|
H A D | R600TargetTransformInfo.cpp | 117 unsigned EltSize = in getVectorInstrCost() local 119 if (EltSize < 32) { in getVectorInstrCost()
|
H A D | SIRegisterInfo.cpp | 99 unsigned EltSize = 4; member 126 SplitParts = TRI.getRegSplitParts(RC, EltSize); in SGPRSpillBuilder() 1294 unsigned EltSize) { in getFlatScratchSpillOpcode() argument 1300 switch (EltSize) { in getFlatScratchSpillOpcode() 1356 unsigned EltSize = (IsFlat && !IsAGPR) ? std::min(RegWidth, 16u) : 4u; in buildSpillLoadStore() local 1357 unsigned NumSubRegs = RegWidth / EltSize; in buildSpillLoadStore() 1358 unsigned Size = NumSubRegs * EltSize; in buildSpillLoadStore() 1364 int64_t MaxOffset = Offset + Size + RemSize - EltSize; in buildSpillLoadStore() 1367 if (IsFlat && EltSize > 4) { in buildSpillLoadStore() 1368 LoadStoreOp = getFlatScratchSpillOpcode(TII, LoadStoreOp, EltSize); in buildSpillLoadStore() [all …]
|
H A D | AMDGPULegalizerInfo.cpp | 79 const unsigned EltSize = EltTy.getSizeInBits(); in isSmallOddVector() local 81 EltSize > 1 && EltSize < 32 && in isSmallOddVector() 130 const int EltSize = EltTy.getSizeInBits(); in moreEltsToNext32Bit() local 133 assert(EltSize < 32); in moreEltsToNext32Bit() 135 const int NewNumElts = (32 * NextMul32 + EltSize - 1) / EltSize; in moreEltsToNext32Bit() 145 const unsigned EltSize = Ty.getElementType().getSizeInBits(); in moreElementsToNextExistingRegClass() local 146 const unsigned MaxNumElts = MaxRegisterSize / EltSize; in moreElementsToNextExistingRegClass() 148 assert(EltSize == 32 || EltSize == 64); in moreElementsToNextExistingRegClass() 154 if (SIRegisterInfo::getSGPRClassForBitWidth(NewNumElts * EltSize)) in moreElementsToNextExistingRegClass() 158 return std::pair(TypeIdx, LLT::fixed_vector(NewNumElts, EltSize)); in moreElementsToNextExistingRegClass() [all …]
|
/freebsd/contrib/llvm-project/clang/lib/CodeGen/ |
H A D | CGBuilder.h | 244 CharUnits EltSize = variable 251 Addr.getAlignment().alignmentAtOffset(Index * EltSize), 264 CharUnits EltSize = CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy)); variable 268 ElTy, Addr.getAlignment().alignmentAtOffset(Index * EltSize), 281 CharUnits EltSize = CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy)); variable 285 Addr.getAlignment().alignmentAtOffset(Index * EltSize)); 294 CharUnits EltSize = variable 300 Addr.getAlignment().alignmentOfArrayElement(EltSize));
|
H A D | CGNonTrivialStruct.cpp | 196 CharUnits EltSize = Ctx.getTypeSizeInChars(EltTy); in visitArray() local 198 llvm::to_string(EltSize.getQuantity()) + "n" + in visitArray() 395 CharUnits EltSize = Ctx.getTypeSizeInChars(EltQT); in visitArray() local 401 StartAddrs[I].getAlignment().alignmentAtOffset(EltSize)); in visitArray() 412 NewAddrs[I] = getAddrWithOffset(NewAddrs[I], EltSize); in visitArray()
|
/freebsd/contrib/llvm-project/clang/lib/CodeGen/Targets/ |
H A D | PPC.cpp | 18 CharUnits EltSize, const ComplexType *CTy) { in complexTempStructure() argument 27 CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize - EltSize); in complexTempStructure() 29 2 * SlotSize - EltSize); in complexTempStructure() 255 CharUnits EltSize = TypeInfo.Width / 2; in EmitVAArg() local 256 if (EltSize < SlotSize) in EmitVAArg() 257 return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy); in EmitVAArg() 972 CharUnits EltSize = TypeInfo.Width / 2; in EmitVAArg() local 973 if (EltSize < SlotSize) in EmitVAArg() 974 return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy); in EmitVAArg()
|
H A D | X86.cpp | 2010 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); in classify() local 2018 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) in classify() 2021 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { in classify() 2315 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); in BitsContainNoUserData() local 2321 unsigned EltOffset = i*EltSize; in BitsContainNoUserData() 2402 unsigned EltSize = TD.getTypeAllocSize(EltTy); in getFPTypeAtOffset() local 2403 IROffset -= IROffset / EltSize * EltSize; in getFPTypeAtOffset() 2515 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); in GetINTEGERTypeAtOffset() local 2516 unsigned EltOffset = IROffset/EltSize*EltSize; in GetINTEGERTypeAtOffset()
|
H A D | LoongArch.cpp | 158 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy); in detectFARsEligibleStructHelper() local 163 CurOff += EltSize; in detectFARsEligibleStructHelper()
|
H A D | AMDGPU.cpp | 76 unsigned EltSize = getContext().getTypeSize(EltTy); in numRegsForType() local 79 if (EltSize == 16) in numRegsForType() 82 unsigned EltNumRegs = (EltSize + 31) / 32; in numRegsForType()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMTargetTransformInfo.cpp | 1591 unsigned EltSize = VTy->getScalarSizeInBits(); in getGatherScatterOpCost() local 1612 if (EltSize < 8 || Alignment < EltSize / 8) in getGatherScatterOpCost() 1615 unsigned ExtSize = EltSize; in getGatherScatterOpCost() 1629 if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) || in getGatherScatterOpCost() 1630 (TypeSize == 16 && EltSize == 8)) && in getGatherScatterOpCost() 1643 if (((EltSize == 16 && TypeSize == 32) || in getGatherScatterOpCost() 1644 (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) && in getGatherScatterOpCost() 1688 unsigned EltSize = ValVT.getScalarSizeInBits(); in getArithmeticReductionCost() local 1694 ((EltSize == 32 && ST->hasVFP2Base()) || in getArithmeticReductionCost() 1695 (EltSize == 64 && ST->hasFP64()) || in getArithmeticReductionCost() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64PostLegalizerLowering.cpp | 164 unsigned EltSize = Ty.getScalarSizeInBits(); in matchREV() local 167 if (EltSize == 64) in matchREV() 174 if (isREVMask(ShuffleMask, EltSize, NumElts, LaneSize)) { in matchREV() 441 auto EltSize = Builder.buildConstant(IdxTy, EltTy.getSizeInBytes()); in applyNonConstInsert() local 442 Register Mul = Builder.buildMul(IdxTy, And, EltSize).getReg(0); in applyNonConstInsert() 984 unsigned EltSize = MRI.getType(LHS).getScalarSizeInBits(); in matchLowerVectorFCMP() local 985 if (EltSize == 16 && !ST.hasFullFP16()) in matchLowerVectorFCMP() 987 if (EltSize != 16 && EltSize != 32 && EltSize != 64) in matchLowerVectorFCMP()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | LegalizationArtifactCombiner.h | 936 unsigned NumElts, unsigned EltSize, in isSequenceFromUnmerge() argument 942 MI.getSourceReg(i), EltSize, EltUnmergeIdx); in isSequenceFromUnmerge() 962 unsigned EltSize = EltTy.getSizeInBits(); in tryCombineMergeLike() local 966 auto *Unmerge = findUnmergeThatDefinesReg(Elt0, EltSize, Elt0UnmergeIdx); in tryCombineMergeLike() 984 if (!isSequenceFromUnmerge(MI, 0, Unmerge, 0, NumMIElts, EltSize, in tryCombineMergeLike() 1007 EltSize, false)) in tryCombineMergeLike() 1011 unsigned DstIdx = (Elt0UnmergeIdx * EltSize) / DstTy.getSizeInBits(); in tryCombineMergeLike() 1035 EltSize, EltUnmergeIdx); in tryCombineMergeLike() 1040 if (!isSequenceFromUnmerge(MI, i, UnmergeI, 0, NumElts, EltSize, in tryCombineMergeLike()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86ISelDAGToDAG.cpp | 4593 unsigned EltSize = MemIntr->getMemoryVT().getSizeInBits(); in matchVPTERNLOG() local 4594 assert((EltSize == 32 || EltSize == 64) && "Unexpected broadcast size!"); in matchVPTERNLOG() 4596 bool UseD = EltSize == 32; in matchVPTERNLOG() 6351 unsigned EltSize = ValueSVT.getSizeInBits(); in Select() local 6356 if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32) in Select() 6358 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32) in Select() 6360 else if (IndexVT == MVT::v16i32 && NumElts == 16 && EltSize == 32) in Select() 6362 else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64) in Select() 6364 else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64) in Select() 6366 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 64) in Select() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | Analysis.cpp | 108 TypeSize EltSize = DL.getTypeAllocSize(EltTy); in ComputeValueVTs() local 111 StartingOffset + i * EltSize); in ComputeValueVTs() 161 uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue(); in computeValueLLTs() local 164 StartingOffset + i * EltSize); in computeValueLLTs()
|
/freebsd/contrib/llvm-project/llvm/lib/Analysis/ |
H A D | Loads.cpp | 269 APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()), in isDereferenceableAndAlignedInLoop() local 278 return isDereferenceableAndAlignedPointer(Ptr, Alignment, EltSize, DL, in isDereferenceableAndAlignedInLoop() 296 if (EltSize.sgt(Step->getAPInt())) in isDereferenceableAndAlignedInLoop() 342 if (EltSize.urem(Alignment.value()) != 0) in isDereferenceableAndAlignedInLoop()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCTargetTransformInfo.cpp | 702 unsigned EltSize = Val->getScalarSizeInBits(); in getVectorInstrCost() local 704 unsigned MaskCostForOneBitSize = (VecMaskCost && EltSize == 1) ? 1 : 0; in getVectorInstrCost() 719 unsigned EltSize = Val->getScalarSizeInBits(); in getVectorInstrCost() local 721 if (EltSize == 64 && Index != -1U) in getVectorInstrCost() 723 else if (EltSize == 32) { in getVectorInstrCost()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/MCTargetDesc/ |
H A D | AArch64InstPrinter.h | 193 template <int EltSize> 219 template <int EltSize>
|