| /linux/drivers/staging/greybus/ |
| H A D | loopback.c | 53 struct gb_loopback *gb; member 108 #define GB_LOOPBACK_TIMEOUT_MIN 1 127 struct gb_loopback *gb = dev_get_drvdata(dev); \ 128 return sprintf(buf, "%u\n", gb->field); \ 137 struct gb_loopback *gb = dev_get_drvdata(dev); \ 139 if (!gb->requests_completed) \ 141 return sprintf(buf, "%" #type "\n", gb->name.field); \ 151 struct gb_loopback *gb; \ 154 gb = dev_get_drvdata(dev); \ 155 stats = &gb->name; \ [all …]
|
| H A D | audio_topology.c | 133 static const char **gb_generate_enum_strings(struct gbaudio_module_info *gb, in gb_generate_enum_strings() argument 142 strings = devm_kcalloc(gb->dev, items, sizeof(char *), GFP_KERNEL); in gb_generate_enum_strings() 192 if (uinfo->value.enumerated.item > max - 1) in gbcodec_mixer_ctl_info() 193 uinfo->value.enumerated.item = max - 1; in gbcodec_mixer_ctl_info() 218 struct gbaudio_codec_info *gb = snd_soc_component_get_drvdata(comp); in gbcodec_mixer_ctl_get() local 222 module = find_gb_module(gb, kcontrol->id.name); in gbcodec_mixer_ctl_get() 252 ucontrol->value.integer.value[1] = in gbcodec_mixer_ctl_get() 253 le32_to_cpu(gbvalue.value.integer_value[1]); in gbcodec_mixer_ctl_get() 259 ucontrol->value.enumerated.item[1] = in gbcodec_mixer_ctl_get() 260 le32_to_cpu(gbvalue.value.enumerated_item[1]); in gbcodec_mixer_ctl_get() [all …]
|
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_ethtool.h | 72 [1] = ICE_PHY_TYPE(100MB, 100baseT_Full), 81 [10] = ICE_PHY_TYPE(5GB, 5000baseT_Full), 82 [11] = ICE_PHY_TYPE(5GB, 5000baseT_Full), 83 [12] = ICE_PHY_TYPE(10GB, 10000baseT_Full), 84 [13] = ICE_PHY_TYPE(10GB, 10000baseCR_Full), 85 [14] = ICE_PHY_TYPE(10GB, 10000baseSR_Full), 86 [15] = ICE_PHY_TYPE(10GB, 10000baseLR_Full), 87 [16] = ICE_PHY_TYPE(10GB, 10000baseKR_Full), 88 [17] = ICE_PHY_TYPE(10GB, 10000baseCR_Full), 89 [18] = ICE_PHY_TYPE(10GB, 10000baseKR_Full), [all …]
|
| /linux/drivers/scsi/qla2xxx/ |
| H A D | qla_devtbl.h | 8 "QLA2340", "133MHz PCI-X to 2Gb FC, Single Channel", /* 0x100 */ 9 "QLA2342", "133MHz PCI-X to 2Gb FC, Dual Channel", /* 0x101 */ 10 "QLA2344", "133MHz PCI-X to 2Gb FC, Quad Channel", /* 0x102 */ 11 "QCP2342", "cPCI to 2Gb FC, Dual Channel", /* 0x103 */ 12 "QSB2340", "SBUS to 2Gb FC, Single Channel", /* 0x104 */ 13 "QSB2342", "SBUS to 2Gb FC, Dual Channel", /* 0x105 */ 14 "QLA2310", "Sun 66MHz PCI-X to 2Gb FC, Single Channel", /* 0x106 */ 15 "QLA2332", "Sun 66MHz PCI-X to 2Gb FC, Single Channel", /* 0x107 */ 16 "QCP2332", "Sun cPCI to 2Gb FC, Dual Channel", /* 0x108 */ 17 "QCP2340", "cPCI to 2Gb FC, Single Channel", /* 0x109 */ [all …]
|
| /linux/drivers/input/joystick/ |
| H A D | gf2k.c | 33 #define GF2K_ID_G09 1 43 …char gf2k_hat_to_axis[][2] = {{ 0, 0}, { 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, … 123 while ((gameport_read(gameport) & 1) && t) t--; in gf2k_trigger_seq() 139 #define GB(p,n,s) gf2k_get_bits(data, p, n, s) macro 149 data &= (1 << num) - 1; in gf2k_get_bits() 161 input_report_abs(dev, gf2k_abs[i], GB(i<<3,8,0) | GB(i+46,1,8) | GB(i+50,1,9)); in gf2k_read() 164 input_report_abs(dev, gf2k_abs[i], GB(i*9+60,8,0) | GB(i+54,1,9)); in gf2k_read() 166 t = GB(40,4,0); in gf2k_read() 171 t = GB(44,2,0) | GB(32,8,2) | GB(78,2,10); in gf2k_read() 174 input_report_key(dev, gf2k_btn_joy[i], (t >> i) & 1); in gf2k_read() [all …]
|
| /linux/drivers/md/dm-vdo/indexer/ |
| H A D | geometry.c | 32 * For a small index with a memory footprint less than 1GB, there are three possible memory 33 * configurations: 0.25GB, 0.5GB and 0.75GB. The default geometry for each is 1024 index records 36 * the VDO default of a 0.25 GB index, this yields a deduplication window of 256 GB using about 2.5 37 * GB for the persistent storage and 256 MB of RAM. 39 * For a larger index with a memory footprint that is a multiple of 1 GB, the geometry is 1024 41 * chapters for every GB of memory footprint. For a 1 GB volume, this yields a deduplication window 42 * of 1 TB using about 9GB of persistent storage and 1 GB of RAM. 64 result = vdo_allocate(1, struct index_geometry, "geometry", &geometry); in uds_make_index_geometry() 80 geometry->chapter_mean_delta = 1 << DEFAULT_CHAPTER_MEAN_DELTA_BITS; in uds_make_index_geometry() 81 geometry->chapter_payload_bits = bits_per(record_pages_per_chapter - 1); in uds_make_index_geometry() [all …]
|
| /linux/tools/perf/pmu-events/arch/arm64/fujitsu/monaka/ |
| H A D | tlb.json | 62 … "BriefDescription": "This event counts operations that cause a TLB access to the L1I in 1GB page." 67 …"BriefDescription": "This event counts operations that cause a TLB access to the L1I in 16GB page." 97 … "BriefDescription": "This event counts operations that cause a TLB access to the L1D in 1GB page." 102 …"BriefDescription": "This event counts operations that cause a TLB access to the L1D in 16GB page." 132 … "BriefDescription": "This event counts operations that cause a TLB refill of the L1I in 1GB page." 137 …"BriefDescription": "This event counts operations that cause a TLB refill of the L1I in 16GB page." 167 … "BriefDescription": "This event counts operations that cause a TLB refill of the L1D in 1GB page." 172 …"BriefDescription": "This event counts operations that cause a TLB refill of the L1D in 16GB page." 202 … "BriefDescription": "This event counts operations that cause a TLB access to the L2I in 1GB page." 207 …"BriefDescription": "This event counts operations that cause a TLB access to the L2I in 16GB page." [all …]
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| H A D | hugetlb.rst | 34 For a system supporting three hugepage sizes (64k, 32M and 1G), the control 37 hugetlb.1GB.limit_in_bytes 38 hugetlb.1GB.max_usage_in_bytes 39 hugetlb.1GB.numa_stat 40 hugetlb.1GB.usage_in_bytes 41 hugetlb.1GB.failcnt 42 hugetlb.1GB.rsvd.limit_in_bytes 43 hugetlb.1GB.rsvd.max_usage_in_bytes 44 hugetlb.1GB.rsvd.usage_in_bytes 45 hugetlb.1GB.rsvd.failcnt [all …]
|
| /linux/Documentation/arch/x86/x86_64/ |
| H A D | mm.rst | 20 from TB to GB and then MB/KB. 60 ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole 61 ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base) 62 ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole 73 ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks 74 ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole 75 ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space 76 ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole 77 …ffffffff80000000 | -2 GB | ffffffff9fffffff | 512 MB | kernel text mapping, mapped to physic… 143 ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks [all …]
|
| /linux/drivers/staging/greybus/Documentation/firmware/ |
| H A D | firmware-management | 21 ; Firmware Management Bundle (Bundle 1): 22 [bundle-descriptor 1] 25 ; (Mandatory) Firmware Management Protocol on CPort 1 27 bundle = 1 31 [cport-descriptor 1] 32 bundle = 1 37 bundle = 1 42 bundle = 1 51 and will be named gb-fw-mgmt-<N>. The number <N> is assigned at runtime. 56 There can be multiple devices present in /dev/ directory with name gb-fw-mgmt-N [all …]
|
| /linux/Documentation/arch/riscv/ |
| H A D | vm-layout.rst | 39 …0000000000000000 | 0 | 0000003fffffffff | 256 GB | user-space virtual memory, different … 42 …0000004000000000 | +256 GB | ffffffbfffffffff | ~16M TB | ... huge, almost 64 bits wide hole of… 43 … | | | | virtual memory addresses up to the -256 GB 50 ffffffc4fea00000 | -236 GB | ffffffc4feffffff | 6 MB | fixmap 51 ffffffc4ff000000 | -236 GB | ffffffc4ffffffff | 16 MB | PCI io 52 ffffffc500000000 | -236 GB | ffffffc5ffffffff | 4 GB | vmemmap 53 ffffffc600000000 | -232 GB | ffffffd5ffffffff | 64 GB | vmalloc/ioremap space 54 … ffffffd600000000 | -168 GB | fffffff5ffffffff | 128 GB | direct mapping of all physical memory 56 fffffff700000000 | -36 GB | fffffffeffffffff | 32 GB | kasan 62 ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF [all …]
|
| /linux/fs/hfsplus/ |
| H A D | btree.c | 30 /* 1GB */ 4, 4, 4, 31 /* 2GB */ 6, 6, 4, 32 /* 4GB */ 8, 8, 4, 33 /* 8GB */ 11, 11, 5, 35 * For volumes 16GB and larger, we want to make sure that a full OS 57 * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times 58 * the previous term. For Attributes (16GB to 512GB), each term is 59 * 4**(1/5) times the previous term. For 1TB to 16TB, each term is 60 * 2**(1/5) times the previous term. 62 /* 16GB */ 64, 32, 5, [all …]
|
| /linux/include/linux/ |
| H A D | xxhash.h | 51 * xxHash 5.4 GB/s 10 52 * CrapWow 3.2 GB/s 2 Andrew 53 * MumurHash 3a 2.7 GB/s 10 Austin Appleby 54 * SpookyHash 2.0 GB/s 10 Bob Jenkins 55 * SBox 1.4 GB/s 9 Bret Mulvey 56 * Lookup3 1.2 GB/s 9 Bob Jenkins 57 * SuperFastHash 1.2 GB/s 1 Paul Hsieh 58 * CityHash64 1.05 GB/s 10 Pike & Alakuijala 59 * FNV 0.55 GB/s 5 Fowler, Noll, Vo 60 * CRC32 0.43 GB/s 9 [all …]
|
| /linux/arch/powerpc/include/asm/book3s/64/ |
| H A D | radix-4k.h | 9 #define RADIX_PMD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 2MB = 1GB 10 #define RADIX_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 1GB = 512GB 11 #define RADIX_PGD_INDEX_SIZE 13 // size: 8B << 13 = 64KB, maps 2^13 x 512GB = 4PB
|
| H A D | radix-64k.h | 9 #define RADIX_PMD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 2MB = 1GB 10 #define RADIX_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 1GB = 512GB 11 #define RADIX_PGD_INDEX_SIZE 13 // size: 8B << 13 = 64KB, maps 2^13 x 512GB = 4PB
|
| /linux/tools/testing/selftests/kvm/ |
| H A D | mmu_stress_test.c | 107 TEST_ASSERT_EQ(uc.args[1], stage); in assert_sync_stage() 139 /* Stage 1, re-write all of guest memory. */ in vcpu_worker() 140 run_vcpu(vcpu, 1); in vcpu_worker() 154 TEST_ASSERT(r == -1 && errno == EFAULT, in vcpu_worker() 220 ~((uint64_t)vm->page_size - 1); in spawn_workers() 239 for (i = 0; abs(rendezvoused) != 1; i++) { in rendezvous_with_vcpus() 243 abs(rendezvoused) - 1); in rendezvous_with_vcpus() 252 atomic_set(&rendezvous, -nr_vcpus - 1); in rendezvous_with_vcpus() 254 atomic_set(&rendezvous, nr_vcpus + 1); in rendezvous_with_vcpus() 273 * Skip the first 4gb and slot0. slot0 maps <1gb and is used to back in main() [all …]
|
| /linux/Documentation/devicetree/bindings/net/ |
| H A D | keystone-netcp.txt | 13 includes a 3-port Ethernet switch sub-module capable of 10Gb/s and 1Gb/s rates 25 NetCP subsystem(10G or 1G) 40 |-> Ethernet Port 1 65 1Gb/10Gb (gbe/xgbe) ethernet switch sub-module specifications. 67 - label: Must be "netcp-gbe" for 1Gb & "netcp-xgbe" for 10Gb. 69 "ti,netcp-gbe" for 1GbE on NetCP 1.4 70 "ti,netcp-gbe-5" for 1GbE N NetCP 1.5 (N=5) 71 "ti,netcp-gbe-9" for 1GbE N NetCP 1.5 (N=9) 72 "ti,netcp-gbe-2" for 1GbE N NetCP 1.5 (N=2) 84 index #1 - sgmii port3/4 module registers [all …]
|
| /linux/tools/perf/Documentation/ |
| H A D | perf-bench.txt | 1 perf-bench(1) 119 Based on pipe-test-1m.c by Ingo Molnar. 182 Specify size of memory to copy (default: 1MB). 183 Available units are B, KB, MB, GB and TB (case insensitive). 188 Available values are 4KB, 2MB, 1GB (case insensitive). 193 Available units are B, KB, MB, GB and TB (case insensitive). 216 Specify size of memory to set (default: 1MB). 217 Available units are B, KB, MB, GB and TB (case insensitive). 222 Available values are 4KB, 2MB, 1GB (case insensitive). 227 Available units are B, KB, MB, GB and TB (case insensitive). [all …]
|
| H A D | perf-iostat.txt | 1 perf-iostat(1) 40 1. List all PCIe root ports (example for 2-S platform): 54 $ perf iostat -- dd if=/dev/zero of=/dev/nvme0n1 bs=1M oflag=direct 57 375083606016 bytes (375 GB, 349 GiB) copied, 215.974 s, 1.7 GB/s 62 0000:00 1 0 2 3 73 $ perf iostat 0000:17,0:3a -- dd if=/dev/zero of=/dev/nvme0n1 bs=1M oflag=direct 76 375083606016 bytes (375 GB, 349 GiB) copied, 197.08 s, 1.9 GB/s 88 linkperf:perf-stat[1]
|
| /linux/Documentation/driver-api/ |
| H A D | edac.rst | 77 A Single-ranked stick has 1 chip-select row of memory. Motherboards 201 HBM2e (2GB) channel (equivalent to 8 X 2GB ranks). This creates a total 204 While the UMC is interfacing a 16GB (8high X 2GB DRAM) HBM stack, each UMC 205 channel is interfacing 2GB of DRAM (represented as rank). 213 For example: a heterogeneous system with 1 AMD CPU is connected to 221 - CPU UMCs use 1 channel, In this case UMC = EDAC channel. This follows the 224 - GPU UMCs use 1 chip select, So UMC = EDAC CSROW. 237 mc2 |- GPU card[0] => node 0(mc1), node 1(mc2) 239 mc4 |- GPU card[1] => node 0(mc3), node 1(mc4) 241 mc6 |- GPU card[2] => node 0(mc5), node 1(mc6) [all …]
|
| /linux/tools/perf/pmu-events/arch/x86/goldmontplus/ |
| H A D | virtual-memory.json | 3 "BriefDescription": "Page walk completed due to a demand load to a 1GB page", 4 "Counter": "0,1,2,3", 7 …es) whose address translations missed in all TLB levels and were mapped to 1GB pages. The page wa… 13 "Counter": "0,1,2,3", 22 "Counter": "0,1,2,3", 31 "Counter": "0,1,2,3", 39 "BriefDescription": "Page walk completed due to a demand data store to a 1GB page", 40 "Counter": "0,1,2,3", 43 …ata stores whose address translations missed in the TLB and were mapped to 1GB pages. The page wa… 49 "Counter": "0,1,2,3", [all …]
|
| /linux/drivers/net/ethernet/qlogic/ |
| H A D | Kconfig | 30 tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support" 70 tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" 77 tristate "QLogic QED 25/40/100Gb core driver" 90 bool "QLogic QED 25/40/100Gb SR-IOV support" 100 tristate "QLogic QED 25/40/100Gb Ethernet NIC"
|
| /linux/Documentation/driver-api/cxl/allocation/ |
| H A D | hugepages.rst | 26 1GB Huge Pages 28 CXL capacity onlined in :code:`ZONE_NORMAL` is eligible for 1GB Gigantic Page 31 CXL capacity onlined in :code:`ZONE_MOVABLE` is not eligible for 1GB Gigantic
|
| /linux/tools/arch/x86/lib/ |
| H A D | x86-opcode-map.txt | 34 # (W=1): this opcode requires XOP.W == 1 51 00: ADD Eb,Gb 53 02: ADD Gb,Eb 59 08: OR Eb,Gb 61 0a: OR Gb,Eb 68 10: ADC Eb,Gb 70 12: ADC Gb,Eb 76 18: SBB Eb,Gb 78 1a: SBB Gb,Eb 79 1b: SBB Gv,Ev [all …]
|
| /linux/arch/x86/lib/ |
| H A D | x86-opcode-map.txt | 34 # (W=1): this opcode requires XOP.W == 1 51 00: ADD Eb,Gb 53 02: ADD Gb,Eb 59 08: OR Eb,Gb 61 0a: OR Gb,Eb 68 10: ADC Eb,Gb 70 12: ADC Gb,Eb 76 18: SBB Eb,Gb 78 1a: SBB Gb,Eb 79 1b: SBB Gv,Ev [all …]
|