/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/GISel/ |
H A D | RISCVLegalizerInfo.cpp | 40 (ST.hasStdExtD() && Query.Types[TypeIdx].getSizeInBits() == 64)); in typeIsScalarFPArith() 50 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 || in typeIsLegalIntOrFPVec() 53 ST.getELen() == 64); in typeIsLegalIntOrFPVec() 65 ST.getELen() == 64); in typeIsLegalBoolVec() 78 const LLT s64 = LLT::scalar(64); in RISCVLegalizerInfo() 86 const LLT nxv64s1 = LLT::scalable_vector(64, s1); in RISCVLegalizerInfo() 94 const LLT nxv64s8 = LLT::scalable_vector(64, s8); in RISCVLegalizerInfo() 257 if (XLen == 64 || ST.hasStdExtD()) in RISCVLegalizerInfo() 260 .clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32) in RISCVLegalizerInfo() 272 if (XLen == 64) { in RISCVLegalizerInfo() [all …]
|
H A D | RISCVInstructionSelector.cpp | 61 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB, 64 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB, 69 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB, 71 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const; 72 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, 75 bool selectSExtInreg(MachineInstr &MI, MachineIRBuilder &MIB) const; 76 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB, 78 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB, 81 MachineIRBuilder &MIB) const; 82 bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB, [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64InstructionSelector.cpp | 83 MIB.setMF(MF); in setupMF() 130 MachineIRBuilder &MIB) const; 132 MachineIRBuilder &MIB) const; 134 MachineIRBuilder &MIB) const; 137 MachineIRBuilder &MIB) const; 153 /// Copy lowest part of 128-bit or 64-bit vector to 64-bit or 32-bit 292 /// - Column 0: The 64-bit opcode variants 351 MachineIRBuilder &MIB) const; 356 MachineIRBuilder &MIB) const; 360 MachineIRBuilder &MIB) const; [all …]
|
H A D | AArch64PostLegalizerLowering.cpp | 166 // Element size for a rev cannot be 64. in matchREV() 167 if (EltSize == 64) in matchREV() 173 for (unsigned LaneSize : {64U, 32U, 16U}) { in matchREV() 176 if (LaneSize == 64U) in matchREV() 429 LLT FramePtrTy = LLT::pointer(0, 64); in applyNonConstInsert() 501 auto SrcCst = Builder.buildConstant(LLT::scalar(64), SrcLane); in applyINS() 503 auto DstCst = Builder.buildConstant(LLT::scalar(64), DstLane); in applyINS() 540 MachineIRBuilder MIB(MI); in applyVAshrLshrImm() local 541 auto ImmDef = MIB.buildConstant(LLT::scalar(32), Imm); in applyVAshrLshrImm() 542 MIB.buildInstr(NewOpc, {MI.getOperand(0)}, {MI.getOperand(1), ImmDef}); in applyVAshrLshrImm() [all …]
|
H A D | AArch64LegalizerInfo.cpp | 46 const LLT p0 = LLT::pointer(0, 64); in AArch64LegalizerInfo() 50 const LLT s64 = LLT::scalar(64); in AArch64LegalizerInfo() 61 const LLT v2s64 = LLT::fixed_vector(2, 64); in AArch64LegalizerInfo() 73 /* Begin 64bit types */ in AArch64LegalizerInfo() 102 .widenVectorEltsToVectorMinSize(0, 64) in AArch64LegalizerInfo() 681 .scalarizeIf(scalarOrEltWiderThan(0, 64), 0) in AArch64LegalizerInfo() 682 .scalarizeIf(scalarOrEltWiderThan(1, 64), 1) in AArch64LegalizerInfo() 687 return Query.Types[1] == s16 && Query.Types[0].getSizeInBits() > 64; in AArch64LegalizerInfo() 696 return Query.Types[0].getScalarSizeInBits() <= 64 && in AArch64LegalizerInfo() 703 return Query.Types[1].getScalarSizeInBits() <= 64 && in AArch64LegalizerInfo() [all …]
|
H A D | AArch64PostLegalizerCombiner.cpp | 85 if (DstSize != 16 && DstSize != 32 && DstSize != 64) in matchExtractVecEltPairwiseAdd() 117 LLT s64 = LLT::scalar(64); in applyExtractVecEltPairwiseAdd() 156 // 64-bit is 5 cycles, so this is always a win. in matchAArch64MulConstCombine() 226 auto Shift = B.buildConstant(LLT::scalar(64), ShiftAmt); in matchAArch64MulConstCombine() 241 B.buildShl(DstReg, Res, B.buildConstant(LLT::scalar(64), TrailingZeroes)); in matchAArch64MulConstCombine() 305 /// Match a 128b store of zero and split it into two 64 bit stores, for 332 LLT NewTy = LLT::scalar(64); in applySplitStoreZero128() 336 B.buildConstant(LLT::scalar(64), 8)); in applySplitStoreZero128() 391 if (DstTy != LLT::fixed_vector(2, 64) && DstTy != LLT::fixed_vector(2, 32) && in matchCombineMulCMLT() 513 CSEMIRBuilder &MIB); [all …]
|
H A D | AArch64CallLowering.cpp | 153 auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI); in getStackAddress() 225 MachineInstrBuilder MIB) in CallReturnHandler() 226 : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {} in CallReturnHandler() 229 MIB.addDef(PhysReg, RegState::Implicit); in markPhysRegUsed() 232 MachineInstrBuilder MIB; member 239 MachineInstrBuilder MIB) in ReturnedArgCallReturnHandler() 240 : CallReturnHandler(MIRBuilder, MRI, MIB) {} in ReturnedArgCallReturnHandler() 247 MachineInstrBuilder MIB, bool IsTailCall = false, in OutgoingArgHandler() 249 : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB), IsTailCall(IsTailCall), in OutgoingArgHandler() 257 LLT p0 = LLT::pointer(0, 64); in getStackAddress() [all …]
|
/freebsd/lib/libc/gen/ |
H A D | sysconf.c | 75 int mib[2], sverrno, value; in sysconf() local 83 mib[0] = CTL_KERN; in sysconf() 84 mib[1] = KERN_ARGMAX; in sysconf() 99 mib[0] = CTL_KERN; in sysconf() 100 mib[1] = KERN_NGROUPS; in sysconf() 135 mib[0] = CTL_KERN; in sysconf() 136 mib[1] = KERN_SAVED_IDS; in sysconf() 139 mib[0] = CTL_KERN; in sysconf() 140 mib[1] = KERN_POSIX1; in sysconf() 194 mib[0] = CTL_P1003_1B; in sysconf() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | ARMInstructionSelector.cpp | 47 bool selectCmp(CmpConstants Helper, MachineInstrBuilder &MIB, 60 bool selectGlobal(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; 61 bool selectSelect(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const; 62 bool selectShift(unsigned ShiftOpc, MachineInstrBuilder &MIB) const; 145 void renderInvertedImm(MachineInstrBuilder &MIB, const MachineInstr &MI, 202 else if (Size == 64) in guessRegClass() 233 static bool selectMergeValues(MachineInstrBuilder &MIB, in selectMergeValues() argument 242 Register VReg0 = MIB.getReg(0); in selectMergeValues() 244 assert(MRI.getType(VReg0).getSizeInBits() == 64 && in selectMergeValues() 247 Register VReg1 = MIB.getReg(1); in selectMergeValues() [all …]
|
H A D | ARMCallLowering.cpp | 85 if (VTSize == 64) in isSupportedType() 98 MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) in ARMOutgoingValueHandler() 99 : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB) {} in ARMOutgoingValueHandler() 124 assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size"); in assignValueToReg() 125 assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size"); in assignValueToReg() 129 MIB.addUse(PhysReg, RegState::Implicit); in assignValueToReg() 183 MachineInstrBuilder MIB; member 300 assert(ValSize <= 64 && "Unsupported value size"); in assignValueToReg() 301 assert(LocSize <= 64 && "Unsupported location size"); in assignValueToReg() 434 MachineInstrBuilder MIB) in CallReturnHandler() [all …]
|
/freebsd/crypto/openssl/crypto/sha/asm/ |
H A D | sha512-ia64.pl | 23 # substitutes for 64-bit rotate). 28 # too much. I mean it's 64 32-bit rounds vs. 80 virtually identical 29 # 64-bit ones and 1003*64/80 gives 802. Extra cycles, 2 per round, 32 # reason lower 32 bits are deposited to upper half of 64-bit register 108 $rounds=64; 115 for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); } 124 .ident \"IA-64 ISA artwork by Andy Polyakov <appro\@openssl.org>\" 160 { .mib; add r8=0*$SZ,ctx 162 { .mib; add r10=2*$SZ,ctx 180 $code.=<<___ if ($BITS==64); [all …]
|
H A D | sha1-ia64.pl | 20 # bits of 64-bit register. Just follow mux2 and shrp instructions... 29 .ident \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\" 37 for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); } 131 { .mib; add $e=$e,$K_00_19 // e+=K_00_19 133 { .mib; andcm tmp1=$d,$b 162 { .mib; add $e=$e,$Konst // e+=K_XX_XX 164 { .mib; xor tmp0=$c,$b 166 { .mib; add $e=$e,$X[$i%16] // e+=Xupdate 168 { .mib; xor tmp0=tmp0,$d // F_20_39(b,c,d)=b^c^d 183 { .mib; add $e=$e,$Konst // e+=K_60_79 [all …]
|
/freebsd/sys/contrib/device-tree/Bindings/pci/ |
H A D | faraday,ftpci100.txt | 29 be aligned to a 1MB boundary, and may be 1MB, 2MB, 4MB, 8MB, 16MB, 32MB, 64MB, 50 The plain variant has 128MiB of non-prefetchable memory space, whereas the 51 "dual" variant has 64MiB. Take this into account when describing the ranges. 97 ranges = /* 1MiB I/O space 0x50000000-0x500fffff */ 99 /* 128MiB non-prefetchable memory 0x58000000-0x5fffffff */ 104 /* 128MiB at 0x00000000-0x07ffffff */ 106 /* 64MiB at 0x00000000-0x03ffffff */ 108 /* 64MiB at 0x00000000-0x03ffffff */
|
H A D | faraday,ftpci100.yaml | 21 The plain variant has 128MiB of non-prefetchable memory space, whereas the 22 "dual" variant has 64MiB. Take this into account when describing the ranges. 84 be aligned to a 1MB boundary, and may be 1MB, 2MB, 4MB, 8MB, 16MB, 32MB, 64MB, 135 ranges = /* 1MiB I/O space 0x50000000-0x500fffff */ 137 /* 128MiB non-prefetchable memory 0x58000000-0x5fffffff */ 142 /* 128MiB at 0x00000000-0x07ffffff */ 144 /* 64MiB at 0x00000000-0x03ffffff */ 146 /* 64MiB at 0x00000000-0x03ffffff */
|
/freebsd/contrib/bsnmp/snmp_mibII/ |
H A D | BEGEMOT-MIB2-MIB.txt | 29 -- $Begemot: bsnmp/snmp_mibII/BEGEMOT-MIB2-MIB.txt,v 1.1 2006/02/14 09:04:18 brandt_h Exp $ 31 -- Private MIB for MIB2. 33 BEGEMOT-MIB2-MIB DEFINITIONS ::= BEGIN 39 FROM BEGEMOT-IP-MIB; 56 "The MIB for private mib2 stuff." 79 "The current polling rate for the HC 64-bit counters." 87 "The polling rate to be enforced for the HC 64-bit counters. 93 bit rate in its MIB."
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUInstructionSelector.cpp | 265 APInt Imm(64, MO.getImm()); in getSubOperand64() 557 MachineInstrBuilder MIB = in selectG_MERGE_VALUES() local 561 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef())); in selectG_MERGE_VALUES() 562 MIB.addImm(SubRegs[I]); in selectG_MERGE_VALUES() 702 auto MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg) in selectG_BUILD_VECTOR() local 705 if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI)) in selectG_BUILD_VECTOR() 708 MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), Dst) in selectG_BUILD_VECTOR() 712 if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI)) in selectG_BUILD_VECTOR() 753 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst) in selectG_BUILD_VECTOR() local 759 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); in selectG_BUILD_VECTOR() [all …]
|
/freebsd/sys/contrib/openzfs/man/man4/ |
H A D | zfs.4 | 45 .No 1/2^ Ns Sy dbuf_metadata_cache_shift Pq 1/64th 93 .It Sy dmu_prefetch_max Ns = Ns Sy 134217728 Ns B Po 128 MiB Pc Pq uint 193 .Sy 64 MiB 223 .It Sy l2arc_write_boost Ns = Ns Sy 33554432 Ns B Po 32 MiB Pc Pq u64 228 .It Sy l2arc_write_max Ns = Ns Sy 33554432 Ns B Po 32 MiB Pc Pq u64 247 .It Sy metaslab_aliquot Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64 258 .It Sy metaslab_force_ganging Ns = Ns Sy 16777217 Ns B Po 16 MiB + 1 B Pc Pq u64 304 .It Sy zfs_history_output_max Ns = Ns Sy 1048576 Ns B Po 1 MiB Pc Pq u64 308 .Sy DMU_MAX_ACCESS Pq 64 MiB . 332 .It Sy metaslab_df_max_search Ns = Ns Sy 16777216 Ns B Po 16 MiB Pc Pq uint [all …]
|
/freebsd/usr.bin/netstat/ |
H A D | nhgrp.c | 60 char gw[64]; 196 int mib[7]; in dump_nhgrp_sysctl() local 203 mib[0] = CTL_NET; in dump_nhgrp_sysctl() 204 mib[1] = PF_ROUTE; in dump_nhgrp_sysctl() 205 mib[2] = 0; in dump_nhgrp_sysctl() 206 mib[3] = af; in dump_nhgrp_sysctl() 207 mib[4] = NET_RT_NHGRP; in dump_nhgrp_sysctl() 208 mib[5] = 0; in dump_nhgrp_sysctl() 209 mib[6] = fibnum; in dump_nhgrp_sysctl() 210 if (sysctl(mib, nitems(mib), NULL, &needed, NULL, 0) < 0) in dump_nhgrp_sysctl() [all …]
|
/freebsd/crypto/openssl/crypto/bn/asm/ |
H A D | ia64.S | 4 .ident "IA-64 ISA artwork by Andy Polyakov <appro@openssl.org>" 171 // shall automagically spin in n+5 on "wider" IA-64 implementations:-) 179 .align 64 180 .skip 32 // makes the loop body aligned at 64-byte boundary 189 { .mib; sub r10=r35,r0,1 194 { .mib; ADDP r14=0,r32 // rp 201 { .mib; ADDP r16=0,r34 // bp 232 .align 64 233 .skip 32 // makes the loop body aligned at 64-byte boundary 242 { .mib; sub r10=r35,r0,1 [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/ |
H A D | MipsInstrInfo.cpp | 124 MachineInstrBuilder MIB = BuildMI(&MBB, DL, MCID); in BuildCondBr() local 129 MIB.add(Cond[i]); in BuildCondBr() 131 MIB.addMBB(TBB); in BuildCondBr() 687 MachineInstrBuilder MIB; in genInstrWithNewOpc() local 729 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc)); in genInstrWithNewOpc() 731 // For MIPSR6 JI*C requires an immediate 0 as an operand, JIALC(64) an in genInstrWithNewOpc() 739 MIB->removeOperand(0); in genInstrWithNewOpc() 742 MIB.add(I->getOperand(J)); in genInstrWithNewOpc() 745 MIB.addImm(0); in genInstrWithNewOpc() 753 MIB.addSym(MO.getMCSymbol(), MipsII::MO_JALR); in genInstrWithNewOpc() [all …]
|
H A D | MicroMipsSizeReduction.cpp | 215 ReduceADDIUToADDIUR1SP, OpInfo(OT_Operands02), ImmField(2, 0, 64, 2)}, 219 ReduceADDIUToADDIUR1SP, OpInfo(OT_Operands02), ImmField(2, 0, 64, 2)}, 233 ReduceADDIUToADDIUR1SP, OpInfo(OT_Operands02), ImmField(2, 0, 64, 2)}, 235 ReduceADDIUToADDIUR1SP, OpInfo(OT_Operands02), ImmField(2, 0, 64, 2)}, 711 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); in ReplaceInstruction() local 714 MIB.add(MI->getOperand(2)); in ReplaceInstruction() 717 MIB.add(MI->getOperand(0)); in ReplaceInstruction() 718 MIB.add(MI->getOperand(2)); in ReplaceInstruction() 723 MIB.add(MI->getOperand(0)); in ReplaceInstruction() 724 MIB.add(MI->getOperand(1)); in ReplaceInstruction() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InstrInfo.cpp | 75 cl::init(64), cl::Hidden); 217 // Arithmetic with just 32-bit and 64-bit variants and no immediates. in isDataInvariant() 587 MemBytes = 64; in isFrameLoadOpcode() 683 MemBytes = 64; in isFrameStoreOpcode() 1077 // size, others 32/64 bit ops would test higher bits which test16rr don't in findRedundantFlagInstr() 1175 // type (32-bit or 64-bit) we may just need to forbid SP. in classifyLEAReg() 1187 // another we need to add 64-bit registers to the final MI. in classifyLEAReg() 1192 NewSrc = getX86SubSuperRegister(SrcReg, 64); in classifyLEAReg() 1196 // Virtual register of the wrong class, we have to create a temporary 64-bit in classifyLEAReg() 1257 // But testing has shown this *does* help performance in 64-bit mode (at in convertToThreeAddressWithLEA() [all …]
|
/freebsd/sys/dev/bfe/ |
H A D | if_bfereg.h | 158 #define BFE_EMAC_INT_MIB 0x00000002 /* MIB Interrupt */ 188 #define BFE_MIB_CTRL 0x00000438 /* EMAC MIB Control */ 192 #define BFE_TX_GOOD_O 0x00000500 /* MIB TX Good Octets */ 193 #define BFE_TX_GOOD_P 0x00000504 /* MIB TX Good Packets */ 194 #define BFE_TX_O 0x00000508 /* MIB TX Octets */ 195 #define BFE_TX_P 0x0000050C /* MIB TX Packets */ 196 #define BFE_TX_BCAST 0x00000510 /* MIB TX Broadcast Packets */ 197 #define BFE_TX_MCAST 0x00000514 /* MIB TX Multicast Packets */ 198 #define BFE_TX_64 0x00000518 /* MIB TX <= 64 byte Packets */ 199 #define BFE_TX_65_127 0x0000051C /* MIB TX 65 to 127 byte Packets */ [all …]
|
/freebsd/sys/dev/vge/ |
H A D | if_vge.c | 54 * o 64-bit multicast hash table filter 55 * o 64 entry CAM filter 71 * The other issue has to do with the way 64-bit addresses are handled. 75 * an issue, but if you have a 64-bit system and more than 4GB of 137 * The SQE error counter of MIB seems to report bogus value. 556 * Program the multicast filter. We use the 64-entry CAM filter 557 * for perfect filtering. If there's more than 64 multicast addresses, 695 * controllers are supposed to support 64bit DMA so enable in vge_dma_alloc() 696 * 64bit DMA on these controllers. in vge_dma_alloc() 1895 * entire 64KB TCP/IP segment. Maybe VGE_TD_LS_MOF, in vge_encap() [all …]
|
/freebsd/sys/x86/pci/ |
H A D | pci_early_quirks.c | 45 #define MiB(v) ((unsigned long)(v) << 20) macro 104 return (MiB(1)); in intel_stolen_size_gen3() 106 return (MiB(4)); in intel_stolen_size_gen3() 108 return (MiB(8)); in intel_stolen_size_gen3() 110 return (MiB(16)); in intel_stolen_size_gen3() 112 return (MiB(32)); in intel_stolen_size_gen3() 114 return (MiB(48)); in intel_stolen_size_gen3() 116 return (MiB(64)); in intel_stolen_size_gen3() 118 return (MiB(12 in intel_stolen_size_gen3() [all...] |