/linux/tools/perf/pmu-events/ |
H A D | metric_test.py | 43 before = '(a + b + c + d) / (2 * e)' 44 after = before 45 self.assertEqual(ParsePerfJson(before).ToPerfJson(), after) 49 before = r'topdown\-fe\-bound / topdown\-slots - 1' 50 after = before 51 self.assertEqual(ParsePerfJson(before).ToPerfJson(), after) 55 before = r'arb@event\=0x81\,umask\=0x1@ + arb@event\=0x84\,umask\=0x1@' 56 after = before 57 self.assertEqual(ParsePerfJson(before).ToPerfJson(), after) 60 before = r'a + 1e12 + b' [all …]
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | verifier_subprog_precision.c | 43 __msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7") 44 __msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4") 45 __msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit") 46 __msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1") 47 __msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5") 48 __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6") 49 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") 103 __msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6") 104 __msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4") 105 __msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3") [all …]
|
H A D | verifier_precision.c | 10 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 11 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2") 12 __msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2") 13 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8") in bpf_neg() 30 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 31 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") 32 __msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2") 33 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") in bpf_end_to_le() 51 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 52 __msg("mark_precise: frame0: regs=r2 stack= before [all...] |
H A D | verifier_scalar_ids.c | 17 __msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0") 19 __msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") 23 __msg("frame0: regs=r0 stack= before 5: (bf) r3 = r10") 24 __msg("frame0: regs=r0 stack= before 4: (25) if r1 > 0x7 goto pc+0") 57 __msg("frame0: regs=r0 stack= before 5: (2d) if r1 > r3 goto pc+0") in __flag() 59 __msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7") in __flag() 90 __msg("frame0: regs=r3 stack= before 5: (2d) if r1 > r3 goto pc+0") in __flag() 92 __msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7") in __flag() 122 __msg("frame0: regs=r0 stack= before 6: (bf) r3 = r10") 124 __msg("frame0: regs=r0 stack= before 5: (25) if r0 > 0x7 goto pc+0") [all …]
|
H A D | verifier_spill_fill.c | 522 __msg("mark_precise: frame0: regs=r2 stack= before 10: (71) r2 = *(u8 *)(r10 -9)") 523 __msg("mark_precise: frame0: regs= stack=-16 before 9: (bf) r1 = r6") 524 __msg("mark_precise: frame0: regs= stack=-16 before 8: (73) *(u8 *)(r1 +0) = r2") 525 __msg("mark_precise: frame0: regs= stack=-16 before 7: (0f) r1 += r2") 526 __msg("mark_precise: frame0: regs= stack=-16 before 6: (71) r2 = *(u8 *)(r10 -1)") 527 __msg("mark_precise: frame0: regs= stack=-16 before 5: (bf) r1 = r6") 528 __msg("mark_precise: frame0: regs= stack=-16 before 4: (7b) *(u64 *)(r10 -16) = r0") 529 __msg("mark_precise: frame0: regs=r0 stack= before 3: (b7) r0 = 0") 604 __msg("mark_precise: frame0: regs=r2 stack= before 4: (71) r2 = *(u8 *)(r10 -1)") 650 __msg("mark_precise: frame0: regs=r2 stack= before 8: (79) r2 = *(u64 *)(r10 -8)") in __flag() [all …]
|
/linux/tools/testing/selftests/bpf/verifier/ |
H A D | precise.c | 42 mark_precise: frame0: regs=r2 stack= before 25\ 43 mark_precise: frame0: regs=r2 stack= before 24\ 44 mark_precise: frame0: regs=r2 stack= before 23\ 45 mark_precise: frame0: regs=r2 stack= before 22\ 46 mark_precise: frame0: regs=r2 stack= before 20\ 49 mark_precise: frame0: regs=r2,r9 stack= before 19\ 50 mark_precise: frame0: regs=r9 stack= before 18\ 51 mark_precise: frame0: regs=r8,r9 stack= before 17\ 52 mark_precise: frame0: regs=r0,r9 stack= before 15\ 53 mark_precise: frame0: regs=r0,r9 stack= before 14\ [all …]
|
/linux/tools/memory-model/Documentation/ |
H A D | explanation.txt | 28 20. THE HAPPENS-BEFORE RELATION: hb 29 21. THE PROPAGATES-BEFORE RELATION: pb 150 private variables before using them. All that is beside the point; 163 instance, P1 might run entirely before P0 begins, in which case r1 and 164 r2 will both be 0 at the end. Or P0 might run entirely before P1 169 store to buf but before the store to flag. In this case, r1 and r2 197 Since r1 = 1, P0 must store 1 to flag before P1 loads 1 from 200 P1 loads from flag before loading from buf, since CPUs execute 203 P1 must load 0 from buf before P0 stores 1 to it; otherwise r2 207 P0 stores 1 to buf before storing 1 to flag, since it executes [all …]
|
/linux/tools/perf/util/bpf_skel/ |
H A D | bpf_prog_profiler.bpf.c | 40 /* look up before reading, to reduce error */ in BPF_PROG() 55 struct bpf_perf_event_value *before, diff; in fexit_update_maps() local 58 before = bpf_map_lookup_elem(&fentry_readings, &zero); in fexit_update_maps() 60 if (before && before->counter) { in fexit_update_maps() 63 diff.counter = after->counter - before->counter; in fexit_update_maps() 64 diff.enabled = after->enabled - before->enabled; in fexit_update_maps() 65 diff.running = after->running - before->running; in fexit_update_maps() 83 /* read all events before updating the maps, to reduce error */ in BPF_PROG()
|
/linux/tools/testing/selftests/kselftest_harness/ |
H A D | harness-selftest.expected | 5 # harness-selftest.c:19:standalone_pass:before 10 # harness-selftest.c:27:standalone_fail:before 17 # harness-selftest.c:35:signal_pass:before 22 # harness-selftest.c:42:signal_fail:before 29 # harness-selftest.c:62:pass:before 30 # harness-selftest.c:19:pass:before 38 # harness-selftest.c:70:fail:before 46 # harness-selftest.c:77:timeout:before 52 # harness-selftest.c:96:pass:before
|
H A D | harness-selftest.c | 19 TH_LOG("before"); in TEST() 27 TH_LOG("before"); in TEST() 35 TH_LOG("before"); in TEST_SIGNAL() 42 TH_LOG("before"); in TEST_SIGNAL() 62 TH_LOG("before"); in TEST_F() 70 TH_LOG("before"); in TEST_F() 77 TH_LOG("before"); 96 TH_LOG("before"); in TEST_F() 116 TH_LOG("before"); in TEST_F()
|
/linux/tools/bpf/bpftool/skeleton/ |
H A D | profiler.bpf.c | 52 /* look up before reading, to reduce error */ in BPF_PROG() 79 struct bpf_perf_event_value___local *before, diff; in fexit_update_maps() local 81 before = bpf_map_lookup_elem(&fentry_readings, &id); in fexit_update_maps() 83 if (before && before->counter) { in fexit_update_maps() 86 diff.counter = after->counter - before->counter; in fexit_update_maps() 87 diff.enabled = after->enabled - before->enabled; in fexit_update_maps() 88 diff.running = after->running - before->running; in fexit_update_maps() 108 /* read all events before updating the maps, to reduce error */ in BPF_PROG()
|
/linux/Documentation/virt/kvm/x86/ |
H A D | cpuid.rst | 11 mask-out some, or even all KVM-related cpuid features before launching 66 before enabling paravirtualized 70 before enabling paravirtualized 78 before enabling paravirtualized 86 before using paravirtualized 90 before using the second async 96 before using extended destination 99 KVM_FEATURE_HC_MAP_GPA_RANGE 16 guest checks this feature bit before 103 KVM_FEATURE_MIGRATION_CONTROL 17 guest checks this feature bit before
|
/linux/tools/testing/selftests/kvm/arm64/ |
H A D | host_sve.c | 49 unsigned long before, after; in do_sve_roundtrip() local 59 " cntp %[before], p0, p0.B\n" in do_sve_roundtrip() 62 : [before] "=r" (before), in do_sve_roundtrip() 68 if (before != after) { in do_sve_roundtrip() 70 before, after); in do_sve_roundtrip() 73 before, after); in do_sve_roundtrip()
|
/linux/tools/testing/selftests/net/tcp_ao/lib/ |
H A D | sock.c | 482 test_cnt test_cmp_counters(struct tcp_counters *before, in test_cmp_counters() argument 487 if (before->cnt > after->cnt) \ in test_cmp_counters() 489 if (before->cnt != after->cnt) \ in test_cmp_counters() 496 if (before->ao.nr_keys != after->ao.nr_keys) in test_cmp_counters() 501 i = before->ao.nr_keys; in test_cmp_counters() 511 struct tcp_counters *before, in test_assert_counters_sk() argument 517 if (before->cnt > after->cnt) { \ in test_assert_counters_sk() 519 tst_name ?: "", before->cnt, after->cnt); \ in test_assert_counters_sk() 522 if ((before->cnt != after->cnt) != !!(expected & e_cnt)) { \ in test_assert_counters_sk() 525 before->cnt, after->cnt); \ in test_assert_counters_sk() [all …]
|
/linux/include/linux/ |
H A D | entry-common.h | 106 * enable interrupts before invoking syscall_enter_from_user_mode_work(). 143 * syscall_enter_from_user_mode_work - Check and handle work before invoking 177 * before invoking a syscall 264 * function before return. Defaults to NOOP. 277 * arch_exit_to_user_mode - Architecture specific final work before 281 * function before return. Defaults to NOOP. 304 * exit_to_user_mode_loop - do any pending work before leaving to user space 325 /* Flush pending rcuog wakeup before the last need_resched() check */ in exit_to_user_mode_prepare() 353 * is not suitable as the last step before returning to userspace. Must be 356 * The caller has to invoke syscall_exit_to_user_mode_work() before this. [all …]
|
/linux/tools/testing/selftests/bpf/prog_tests/ |
H A D | uprobe_syscall.c | 26 __naked void uretprobe_regs(struct pt_regs *before, struct pt_regs *after) in uretprobe_regs() argument 98 struct pt_regs before = {}, after = {}; in test_uretprobe_regs_equal() local 99 unsigned long *pb = (unsigned long *) &before; in test_uretprobe_regs_equal() 113 uretprobe_regs(&before, &after); in test_uretprobe_regs_equal() 116 cnt = sizeof(before)/sizeof(*pb); in test_uretprobe_regs_equal() 122 * Check register before and after uretprobe_regs_trigger call in test_uretprobe_regs_equal() 130 if (!ASSERT_EQ(pb[i], pa[i], "register before-after value check")) in test_uretprobe_regs_equal() 180 struct pt_regs before = {}, after = {}; in test_uretprobe_regs_change() local 181 unsigned long *pb = (unsigned long *) &before; in test_uretprobe_regs_change() 183 unsigned long cnt = sizeof(before)/sizeof(*pb); in test_uretprobe_regs_change() [all …]
|
/linux/Documentation/filesystems/iomap/ |
H A D | operations.rst | 180 The pagecache takes whatever locks it needs before calling the 191 before calling this function. 201 exclusive mode before calling this function. 234 mode before calling this function. 245 mode before calling this function. 258 mode before calling this function. 269 The pagecache will lock each folio before trying to schedule it for 316 transactions from process context before submitting the bio. 392 extra work before or after the I/O is issued to storage. 439 before completing the call. [all …]
|
/linux/Documentation/nvdimm/ |
H A D | maintainer-entry-profile.rst | 25 before submitting, but it is not required. 32 Those tests need to be passed before the patches go upstream, but not 33 necessarily before initial posting. Contact the list if you need help 38 Before patches enabling a new _DSM family will be considered, it must 49 next merge window they should be sent before -rc4, and ideally 57 In general, please wait up to one week before pinging for feedback. A
|
/linux/net/netfilter/ipset/ |
H A D | ip_set_list_set.c | 36 int before; member 203 if (d->before == 0) { in list_set_utest() 206 } else if (d->before > 0) { in list_set_utest() 252 else if (d->before == 0 || e->id != d->refid) in list_set_uadd() 254 else if (d->before > 0) in list_set_uadd() 260 /* If before/after is used on an empty set */ in list_set_uadd() 261 if ((d->before > 0 && !next) || in list_set_uadd() 262 (d->before < 0 && !prev)) in list_set_uadd() 278 if (d->before == 0) { in list_set_uadd() 282 } else if (d->before > 0) { in list_set_uadd() [all …]
|
/linux/arch/mips/include/asm/octeon/ |
H A D | cvmx-fau.h | 57 * bit will be set. Otherwise the value of the register before 67 * bit will be set. Otherwise the value of the register before 77 * bit will be set. Otherwise the value of the register before 87 * bit will be set. Otherwise the value of the register before 98 * register before the update will be returned. 168 * Returns Value of the register before the update 183 * Returns Value of the register before the update 198 * Returns Value of the register before the update 212 * Returns Value of the register before the update 229 * the value of the register before the update will be [all …]
|
/linux/scripts/ |
H A D | config | 85 local before="$1" 90 sed -e "s$SED_DELIM$before$SED_DELIM$after$SED_DELIM" "$infile" >"$tmpfile" 106 local name=$1 new=$2 before=$3 109 before_re="^($before=|# $before is not set)" 110 if test -n "$before" && grep -Eq "$before_re" "$FN"; then 111 txt_append "^$before=" "$new" "$FN" 112 txt_append "^# $before is not set" "$new" "$FN"
|
/linux/arch/powerpc/lib/ |
H A D | rheap.c | 126 /* XXX: You should have called assure_empty before */ in get_slot() 154 rh_block_t *before; in attach_free_block() local 166 /* Find the blocks immediately before and after the given one in attach_free_block() 168 before = NULL; in attach_free_block() 182 before = blk; in attach_free_block() 188 if (before != NULL && after != NULL) in attach_free_block() 193 if (before && s != (before->start + before->size)) in attach_free_block() 194 before = NULL; in attach_free_block() 200 if (before == NULL && after == NULL) { in attach_free_block() 213 /* Grow the before block */ in attach_free_block() [all …]
|
/linux/tools/testing/selftests/powerpc/pmu/ebb/ |
H A D | pmae_handling_test.c | 29 static uint64_t before, after; variable 44 before = mfspr(SPRN_MMCR0); in syscall_ebb_callee() 50 if (before != after) in syscall_ebb_callee() 88 printf("Saw MMCR0 before 0x%lx after 0x%lx\n", before, after); in test_body()
|
/linux/kernel/sched/ |
H A D | membarrier.c | 13 * barrier before sending the IPI 20 * order to enforce the guarantee that any writes occurring on CPU0 before 39 * so it's possible to have "r1 = x" reordered before "y = 1" at any 46 * before the IPI-induced memory barrier on CPU1. 48 * B) Userspace thread execution before IPI vs membarrier's memory 56 * order to enforce the guarantee that any writes occurring on CPU1 before 76 * before (b) (although not before (a)), so we get "r1 = 0". This violates 177 * ensure that memory on remote CPUs that occur before the IPI in ipi_sync_core() 194 * to the current task before the current task resumes. We could in ipi_rseq() 215 * before registration. in ipi_sync_rq_state() [all …]
|
/linux/Documentation/ABI/obsolete/ |
H A D | sysfs-driver-hid-roccat-ryos | 27 Before reading this file, control has to be written to select 37 Before reading this file, control has to be written to select 47 Before reading this file, control has to be written to select 57 Before reading this file, control has to be written to select 67 Before reading this file, control has to be written to select 77 Before reading this file, control has to be written to select 88 Before reading this file, control has to be written to select 99 Before reading this file, control has to be written to select 110 Before reading this file, control has to be written to select 156 Before reading this file, control has to be written to select [all …]
|