/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/ |
H A D | MipsLegalizerInfo.cpp | 25 unsigned MemSize; member 31 static bool isUnalignedMemmoryAccess(uint64_t MemSize, uint64_t AlignInBits) { in isUnalignedMemmoryAccess() argument 32 assert(isPowerOf2_64(MemSize) && "Expected power of 2 memory size"); in isUnalignedMemmoryAccess() 34 if (MemSize > AlignInBits) in isUnalignedMemmoryAccess() 53 if (Val.MemSize != QueryMemSize) in CheckTy0Ty1MemSizeAlign() 347 unsigned MemSize = (**MI.memoperands_begin()).getSize().getValue(); in legalizeCustom() local 353 assert(MemSize <= 8 && "MemSize is too large"); in legalizeCustom() 359 if (isPowerOf2_64(MemSize)) { in legalizeCustom() 360 P2HalfMemSize = RemMemSize = MemSize / 2; in legalizeCustom() 362 P2HalfMemSize = 1 << Log2_32(MemSize); in legalizeCustom() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | CallLowering.cpp | 249 uint64_t MemSize = DL.getTypeAllocSize(ElementTy); in setArgFlags() local 251 Flags.setByRefSize(MemSize); in setArgFlags() 253 Flags.setByValSize(MemSize); in setArgFlags() 912 uint64_t MemSize = Flags.getByValSize(); in handleAssignments() local 917 Handler.getStackAddress(MemSize, Offset, DstMPO, Flags); in handleAssignments() 935 MemSize, VA); in handleAssignments() 1272 const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize, in copyArgumentMemory() argument 1277 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable, MemSize, in copyArgumentMemory() 1283 MemSize, DstAlign); in copyArgumentMemory() 1288 auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize); in copyArgumentMemory()
|
H A D | LegalizerHelper.cpp | 734 uint64_t MemSize = MemType.getSizeInBytes(); in getOutlineAtomicLibcall() local 746 return getOutlineAtomicHelper(LC, Ordering, MemSize); in getOutlineAtomicLibcall() 750 return getOutlineAtomicHelper(LC, Ordering, MemSize); in getOutlineAtomicLibcall() 755 return getOutlineAtomicHelper(LC, Ordering, MemSize); in getOutlineAtomicLibcall() 759 return getOutlineAtomicHelper(LC, Ordering, MemSize); in getOutlineAtomicLibcall() 763 return getOutlineAtomicHelper(LC, Ordering, MemSize); in getOutlineAtomicLibcall() 767 return getOutlineAtomicHelper(LC, Ordering, MemSize); in getOutlineAtomicLibcall() 1380 unsigned MemSize = MMO.getSizeInBits().getValue(); in narrowScalar() local 1382 if (MemSize == NarrowSize) { in narrowScalar() 1384 } else if (MemSize < NarrowSize) { in narrowScalar() [all …]
|
/freebsd/sys/dev/pms/freebsd/driver/common/ |
H A D | lxproto.h | 52 U32 MemSize,
|
H A D | lxutil.c | 628 U32 MemSize, in agtiapi_MemAlloc() argument 640 *VirtAlloc = malloc( MemSize + Align, M_PMC_MMAL, M_ZERO | M_NOWAIT ); in agtiapi_MemAlloc() 651 pmsc->typhIdx += residAlign + MemSize; // update index in agtiapi_MemAlloc() 659 pmsc->tyPhsIx += residAlign + MemSize; // update index in agtiapi_MemAlloc()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/ |
H A D | RuntimeLibcallUtil.h | 73 uint64_t MemSize);
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | TargetLoweringBase.cpp | 403 uint64_t MemSize) { in getOutlineAtomicHelper() argument 405 switch (MemSize) { in getOutlineAtomicHelper() 450 uint64_t MemSize = VT.getScalarSizeInBits() / 8; in getOUTLINE_ATOMIC() local 459 return getOutlineAtomicHelper(LC, Order, MemSize); in getOUTLINE_ATOMIC() 463 return getOutlineAtomicHelper(LC, Order, MemSize); in getOUTLINE_ATOMIC() 467 return getOutlineAtomicHelper(LC, Order, MemSize); in getOUTLINE_ATOMIC() 471 return getOutlineAtomicHelper(LC, Order, MemSize); in getOUTLINE_ATOMIC() 475 return getOutlineAtomicHelper(LC, Order, MemSize); in getOUTLINE_ATOMIC() 479 return getOutlineAtomicHelper(LC, Order, MemSize); in getOUTLINE_ATOMIC()
|
H A D | TargetInstrInfo.cpp | 650 int64_t MemSize = 0; in foldMemoryOperand() local 655 MemSize = MFI.getObjectSize(FI); in foldMemoryOperand() 666 MemSize = std::max(MemSize, OpSize); in foldMemoryOperand() 670 assert(MemSize && "Did not expect a zero-sized stack slot"); in foldMemoryOperand() 700 Flags, MemSize, MFI.getObjectAlign(FI)); in foldMemoryOperand()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | CallLowering.h | 265 virtual Register getStackAddress(uint64_t MemSize, int64_t Offset, 321 uint64_t MemSize, CCValAssign &VA) const;
|
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/GISel/ |
H A D | RISCVCallLowering.cpp | 70 Register getStackAddress(uint64_t MemSize, int64_t Offset, in getStackAddress() 219 Register getStackAddress(uint64_t MemSize, int64_t Offset, in getStackAddress() 224 int FI = MFI.CreateFixedObject(MemSize, Offset, /*Immutable=*/true); in getStackAddress()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPULegalizerInfo.cpp | 411 uint64_t MemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits(); in isLoadStoreSizeLegal() local 420 if (Ty.isVector() && MemSize != RegSize) in isLoadStoreSizeLegal() 427 if (IsLoad && MemSize < Size) in isLoadStoreSizeLegal() 428 MemSize = std::max(MemSize, Align); in isLoadStoreSizeLegal() 432 if (MemSize != RegSize && RegSize != 32) in isLoadStoreSizeLegal() 435 if (MemSize > maxSizeForAddrSpace(ST, AS, IsLoad, in isLoadStoreSizeLegal() 440 switch (MemSize) { in isLoadStoreSizeLegal() 459 assert(RegSize >= MemSize); in isLoadStoreSizeLegal() 461 if (AlignBits < MemSize) { in isLoadStoreSizeLegal() 463 if (!TLI->allowsMisalignedMemoryAccessesImpl(MemSize, AS, in isLoadStoreSizeLegal() [all …]
|
H A D | AMDGPURegisterBankInfo.cpp | 452 const unsigned MemSize = 8 * MMO->getSize().getValue(); in isScalarLoadLegal() local 457 ((MemSize == 16 && MMO->getAlign() >= Align(2)) || in isScalarLoadLegal() 458 (MemSize == 8 && MMO->getAlign() >= Align(1))))) && in isScalarLoadLegal() 1073 const unsigned MemSize = 8 * MMO->getSize().getValue(); in applyMappingLoad() local 1079 (MemSize == 32 || LoadTy.isVector() || !isScalarLoadLegal(MI))) in applyMappingLoad() 1083 ((MemSize == 8 && MMO->getAlign() >= Align(1)) || in applyMappingLoad() 1084 (MemSize == 16 && MMO->getAlign() >= Align(2))) && in applyMappingLoad() 1100 B.buildSExtInReg(MI.getOperand(0), WideLoad, MemSize); in applyMappingLoad() 1104 B.buildZExtInReg(MI.getOperand(0), WideLoad, MemSize); in applyMappingLoad() 1371 const unsigned MemSize = (Ty.getSizeInBits() + 7) / 8; in applyMappingSBufferLoad() local [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64LoadStoreOptimizer.cpp | 995 int MemSize = TII->getMemScale(*Paired); in mergePairedInsns() local 1001 PairedOffset /= MemSize; in mergePairedInsns() 1003 PairedOffset *= MemSize; in mergePairedInsns() 1747 int MemSize = TII->getMemScale(MI); in findMatchingInsn() local 1751 if (MIOffset % MemSize) { in findMatchingInsn() 1757 MIOffset /= MemSize; in findMatchingInsn() 1759 MIOffset *= MemSize; in findMatchingInsn()
|
/freebsd/contrib/llvm-project/llvm/lib/ObjCopy/ELF/ |
H A D | ELFObject.cpp | 49 Phdr.p_memsz = Seg.MemSize; in writePhdr() 1240 Seg.VAddr + Seg.MemSize >= Sec.Addr + SecSize; in sectionWithinSegment() 1476 Seg.MemSize = Phdr.p_memsz; in readProgramHeaders() 1501 PrHdr.FileSize = PrHdr.MemSize = Ehdr.e_phentsize * Ehdr.e_phnum; in readProgramHeaders() 2486 ElfHdr.FileSize = ElfHdr.MemSize = sizeof(Elf_Ehdr); in initEhdrSegment()
|
H A D | ELFObject.h | 578 uint64_t MemSize = 0; variable
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/ |
H A D | HexagonFrameLowering.cpp | 2464 unsigned MemSize = HII.getMemAccessSize(MI); in optimizeSpillSlots() local 2468 CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth; in optimizeSpillSlots() 2470 CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth; in optimizeSpillSlots()
|
H A D | HexagonISelLoweringHVX.cpp | 2984 uint64_t MemSize = (MemOpc == ISD::MLOAD || MemOpc == ISD::MSTORE) in SplitHvxMemOp() local 2987 MOp0 = MF.getMachineMemOperand(MMO, 0, MemSize); in SplitHvxMemOp() 2988 MOp1 = MF.getMachineMemOperand(MMO, HwLen, MemSize); in SplitHvxMemOp()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/ObjectYAML/ |
H A D | ELFYAML.h | 713 std::optional<llvm::yaml::Hex64> MemSize; member
|
/freebsd/contrib/llvm-project/llvm/lib/ObjectYAML/ |
H A D | ELFEmitter.cpp | 1213 PHeader.p_memsz = YamlPhdr.MemSize ? uint64_t(*YamlPhdr.MemSize) in setProgramHeaderLayout()
|
H A D | ELFYAML.cpp | 1155 IO.mapOptional("MemSize", Phdr.MemSize); in mapping()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64InstructionSelector.cpp | 5503 unsigned MemSize = Ld.getMMO().getMemoryType().getSizeInBytes(); in selectIndexedLoad() local 5505 if (MemSize < MRI.getType(Dst).getSizeInBytes()) in selectIndexedLoad() 5517 Opc = FPROpcodes[Log2_32(MemSize)]; in selectIndexedLoad() 5519 Opc = GPROpcodes[Log2_32(MemSize)]; in selectIndexedLoad() 5528 Opc = FPROpcodes[Log2_32(MemSize)]; in selectIndexedLoad() 5530 Opc = GPROpcodes[Log2_32(MemSize)]; in selectIndexedLoad()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86FastISel.cpp | 3620 unsigned MemSize = ResVT.getSizeInBits()/8; in fastLowerCall() local 3621 int FI = MFI.CreateStackObject(MemSize, Align(MemSize), false); in fastLowerCall()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMISelDAGToDAG.cpp | 1077 unsigned MemSize = MemN->getMemoryVT().getSizeInBits() / 8; in SelectAddrMode6() local 1078 if (MMOAlign.value() >= MemSize && MemSize > 1) in SelectAddrMode6() 1079 Alignment = MemSize; in SelectAddrMode6()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/AsmParser/ |
H A D | X86AsmParser.cpp | 4251 const char *MemSize = Base[0] != 'f' ? "\x08\x10\x20\x40" : "\x20\x40\x50\0"; in matchAndEmitATTInstruction() local 4281 MemOp->Mem.Size = MemSize[I]; in matchAndEmitATTInstruction()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ |
H A D | SelectionDAGBuilder.cpp | 11558 uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy); in LowerArguments() local 11570 Flags.setByRefSize(MemSize); in LowerArguments() 11572 Flags.setByValSize(MemSize); in LowerArguments()
|