| /linux/tools/perf/Documentation/ |
| H A D | perf-c2c.txt | 57 Configure mem-loads latency. Supported on Intel, Arm64 and some AMD 170 …| Intel | Default | -e ldlat-loads | cpu/mem-loads,ldlat=30/P … 173 …| | Load only | -e ldlat-loads | cpu/mem-loads,ldlat=30/P … 177 …| Intel | Default | -e ldlat-loads | {cpu/mem-loads-aux/,cpu/mem-loads,ldlat=30/}:P … 180 …| | Load only | -e ldlat-loads | {cpu/mem-loads-aux/,cpu/mem-loads,ldlat=30/}:P … 187 …| PowerPC| Default | -e ldlat-loads | cpu/mem-loads/ … 190 …| | Load only | -e ldlat-loads | cpu/mem-loads/ … 247 Total loads
|
| /linux/tools/testing/selftests/drivers/net/hw/ |
| H A D | devlink_port_split.py | 60 ports = json.loads(stdout)['port'] 84 values = list(json.loads(stdout)['port'].values())[0] 102 values = list(json.loads(stdout)['port'].values())[0] 266 validate_devlink_output(json.loads(stdout)) 267 devs = json.loads(stdout)['dev']
|
| H A D | devlink_rate_tc_bw.py | 160 ports = json.loads(out)["port"]
|
| /linux/kernel/sched/ |
| H A D | loadavg.c | 73 void get_avenrun(unsigned long *loads, unsigned long offset, int shift) in get_avenrun() argument 75 loads[0] = (avenrun[0] + offset) << shift; in get_avenrun() 76 loads[1] = (avenrun[1] + offset) << shift; in get_avenrun() 77 loads[2] = (avenrun[2] + offset) << shift; in get_avenrun()
|
| /linux/arch/powerpc/perf/ |
| H A D | power9-pmu.c | 174 GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS); 178 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 182 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 185 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); 188 CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
|
| H A D | power8-pmu.c | 134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 139 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 143 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); 149 CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
|
| H A D | power10-pmu.c | 127 GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS); 134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 138 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 141 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); 146 CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
|
| /linux/arch/alpha/lib/ |
| H A D | ev6-copy_user.S | 64 EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores 116 EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad 203 EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad
|
| /linux/tools/testing/selftests/kvm/x86/ |
| H A D | pmu_event_filter_test.c | 56 uint64_t loads; member 423 const uint64_t loads = rdmsr(msr_base + 0); in masked_events_guest_test() local 434 pmc_results.loads = rdmsr(msr_base + 0) - loads; in masked_events_guest_test() 622 TEST_ASSERT(bool_eq(pmc_results.loads, test->flags & ALLOW_LOADS) && in run_masked_events_tests() 627 test->msg, pmc_results.loads, pmc_results.stores, in run_masked_events_tests()
|
| /linux/Documentation/arch/x86/ |
| H A D | tsx_async_abort.rst | 13 case certain loads may speculatively pass invalid data to dependent operations 15 Synchronization Extensions (TSX) transaction. This includes loads with no 16 fault or assist condition. Such loads may speculatively expose stale data from
|
| /linux/scripts/atomic/kerneldoc/ |
| H A D | read | 6 * Atomically loads the value of @v with ${desc_order} ordering.
|
| /linux/include/uapi/linux/ |
| H A D | sysinfo.h | 10 __kernel_ulong_t loads[3]; /* 1, 5, and 15 minute load averages */ member
|
| /linux/arch/mips/econet/ |
| H A D | Platform | 2 # we put the load address well above where the bootloader loads and then use
|
| /linux/Documentation/ |
| H A D | memory-barriers.txt | 177 perceived by the loads made by another CPU in the same order as the stores were 246 (*) Overlapping loads and stores within a particular CPU will appear to be 274 (*) It _must_not_ be assumed that independent loads and stores will be issued 368 deferral and combination of memory operations; speculative loads; speculative 387 to have any effect on loads. 405 case where two loads are performed such that the second depends on the 412 loads only; it is not required to have any effect on stores, independent 413 loads or overlapping loads. 421 that touched by the load will be perceptible to any loads issued after 438 dependency barriers. Nowadays, APIs for marking loads from shared [all …]
|
| /linux/include/linux/sched/ |
| H A D | loadavg.h | 16 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
|
| /linux/tools/net/ynl/pyynl/ |
| H A D | cli.py | 283 attrs = json.loads(args.json_text) 337 ops = [ (item[0], json.loads(item[1]), args.flags or []) for item in args.multi ]
|
| /linux/Documentation/gpu/nova/core/ |
| H A D | vbios.rst | 10 the driver loads, as well as by the nova-core driver in the kernel to boot the GSP. 32 This firmware then loads other firmware ucodes onto the PMU and SEC2 34 loads (see devinit.rst). The DEVINIT ucode is itself another ucode that is
|
| /linux/tools/memory-model/Documentation/ |
| H A D | control-dependencies.txt | 42 fuse the load from "a" with other loads. Without the WRITE_ONCE(), 219 (*) Control dependencies can order prior loads against later stores. 221 Not prior loads against later loads, nor prior stores against 224 stores and later loads, smp_mb().
|
| H A D | explanation.txt | 79 for the loads, the model will predict whether it is possible for the 80 code to run in such a way that the loads will indeed obtain the 142 shared memory locations and another CPU loads from those locations in 154 A memory model will predict what values P1 might obtain for its loads 197 Since r1 = 1, P0 must store 1 to flag before P1 loads 1 from 198 it, as loads can obtain values only from earlier stores. 200 P1 loads from flag before loading from buf, since CPUs execute 223 each CPU stores to its own shared location and then loads from the 272 X: P1 loads 1 from flag executes before 273 Y: P1 loads 0 from buf executes before [all …]
|
| H A D | recipes.txt | 46 tearing, load/store fusing, and invented loads and stores. 208 and another CPU execute a pair of loads from this same pair of variables, 315 smp_rmb() macro orders prior loads against later loads. Therefore, if 358 second, while another CPU loads from the second variable and then stores 479 that one CPU first stores to one variable and then loads from a second, 480 while another CPU stores to the second variable and then loads from the
|
| /linux/Documentation/tee/ |
| H A D | amd-tee.rst | 55 * TEE_CMD_ID_LOAD_TA - loads a Trusted Application (TA) binary into 72 * open_session - loads the TA binary and opens session with loaded TA.
|
| /linux/kernel/debug/kdb/ |
| H A D | kdb_main.c | 2371 val->loads[0] = avenrun[0]; in kdb_sysinfo() 2372 val->loads[1] = avenrun[1]; in kdb_sysinfo() 2373 val->loads[2] = avenrun[2]; in kdb_sysinfo() 2410 LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]), in kdb_summary() 2411 LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]), in kdb_summary() 2412 LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2])); in kdb_summary()
|
| /linux/arch/mips/kernel/ |
| H A D | mips-r2-to-r6-emul.c | 1274 MIPS_R2_STATS(loads); in mipsr2_decoder() 1348 MIPS_R2_STATS(loads); in mipsr2_decoder() 1608 MIPS_R2_STATS(loads); in mipsr2_decoder() 1727 MIPS_R2_STATS(loads); in mipsr2_decoder() 2267 (unsigned long)__this_cpu_read(mipsr2emustats.loads), in mipsr2_emul_show() 2268 (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); in mipsr2_emul_show() 2324 __this_cpu_write((mipsr2emustats).loads, 0); in mipsr2_clear_show() 2325 __this_cpu_write((mipsr2bdemustats).loads, 0); in mipsr2_clear_show()
|
| /linux/tools/testing/selftests/net/lib/py/ |
| H A D | utils.py | 209 return _json.loads(cmd_obj.stdout) 251 one = _json.loads(l)
|
| /linux/arch/mips/include/asm/ |
| H A D | mips-r2-to-r6-emul.h | 22 u64 loads; member
|