1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <test_progs.h>
4
5 #ifdef __x86_64__
6
7 #include <unistd.h>
8 #include <asm/ptrace.h>
9 #include <linux/compiler.h>
10 #include <linux/stringify.h>
11 #include <linux/kernel.h>
12 #include <sys/wait.h>
13 #include <sys/syscall.h>
14 #include <sys/prctl.h>
15 #include <asm/prctl.h>
16 #include "uprobe_syscall.skel.h"
17 #include "uprobe_syscall_executed.skel.h"
18 #include "bpf/libbpf_internal.h"
19
20 #define USDT_NOP .byte 0x0f, 0x1f, 0x44, 0x00, 0x00
21 #include "usdt.h"
22
23 #pragma GCC diagnostic ignored "-Wattributes"
24
25 __attribute__((aligned(16)))
uprobe_regs_trigger(void)26 __nocf_check __weak __naked unsigned long uprobe_regs_trigger(void)
27 {
28 asm volatile (
29 ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00\n" /* nop5 */
30 "movq $0xdeadbeef, %rax\n"
31 "ret\n"
32 );
33 }
34
uprobe_regs(struct pt_regs * before,struct pt_regs * after)35 __naked void uprobe_regs(struct pt_regs *before, struct pt_regs *after)
36 {
37 asm volatile (
38 "movq %r15, 0(%rdi)\n"
39 "movq %r14, 8(%rdi)\n"
40 "movq %r13, 16(%rdi)\n"
41 "movq %r12, 24(%rdi)\n"
42 "movq %rbp, 32(%rdi)\n"
43 "movq %rbx, 40(%rdi)\n"
44 "movq %r11, 48(%rdi)\n"
45 "movq %r10, 56(%rdi)\n"
46 "movq %r9, 64(%rdi)\n"
47 "movq %r8, 72(%rdi)\n"
48 "movq %rax, 80(%rdi)\n"
49 "movq %rcx, 88(%rdi)\n"
50 "movq %rdx, 96(%rdi)\n"
51 "movq %rsi, 104(%rdi)\n"
52 "movq %rdi, 112(%rdi)\n"
53 "movq $0, 120(%rdi)\n" /* orig_rax */
54 "movq $0, 128(%rdi)\n" /* rip */
55 "movq $0, 136(%rdi)\n" /* cs */
56 "pushq %rax\n"
57 "pushf\n"
58 "pop %rax\n"
59 "movq %rax, 144(%rdi)\n" /* eflags */
60 "pop %rax\n"
61 "movq %rsp, 152(%rdi)\n" /* rsp */
62 "movq $0, 160(%rdi)\n" /* ss */
63
64 /* save 2nd argument */
65 "pushq %rsi\n"
66 "call uprobe_regs_trigger\n"
67
68 /* save return value and load 2nd argument pointer to rax */
69 "pushq %rax\n"
70 "movq 8(%rsp), %rax\n"
71
72 "movq %r15, 0(%rax)\n"
73 "movq %r14, 8(%rax)\n"
74 "movq %r13, 16(%rax)\n"
75 "movq %r12, 24(%rax)\n"
76 "movq %rbp, 32(%rax)\n"
77 "movq %rbx, 40(%rax)\n"
78 "movq %r11, 48(%rax)\n"
79 "movq %r10, 56(%rax)\n"
80 "movq %r9, 64(%rax)\n"
81 "movq %r8, 72(%rax)\n"
82 "movq %rcx, 88(%rax)\n"
83 "movq %rdx, 96(%rax)\n"
84 "movq %rsi, 104(%rax)\n"
85 "movq %rdi, 112(%rax)\n"
86 "movq $0, 120(%rax)\n" /* orig_rax */
87 "movq $0, 128(%rax)\n" /* rip */
88 "movq $0, 136(%rax)\n" /* cs */
89
90 /* restore return value and 2nd argument */
91 "pop %rax\n"
92 "pop %rsi\n"
93
94 "movq %rax, 80(%rsi)\n"
95
96 "pushf\n"
97 "pop %rax\n"
98
99 "movq %rax, 144(%rsi)\n" /* eflags */
100 "movq %rsp, 152(%rsi)\n" /* rsp */
101 "movq $0, 160(%rsi)\n" /* ss */
102 "ret\n"
103 );
104 }
105
test_uprobe_regs_equal(bool retprobe)106 static void test_uprobe_regs_equal(bool retprobe)
107 {
108 LIBBPF_OPTS(bpf_uprobe_opts, opts,
109 .retprobe = retprobe,
110 );
111 struct uprobe_syscall *skel = NULL;
112 struct pt_regs before = {}, after = {};
113 unsigned long *pb = (unsigned long *) &before;
114 unsigned long *pa = (unsigned long *) &after;
115 unsigned long *pp;
116 unsigned long offset;
117 unsigned int i, cnt;
118
119 offset = get_uprobe_offset(&uprobe_regs_trigger);
120 if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
121 return;
122
123 skel = uprobe_syscall__open_and_load();
124 if (!ASSERT_OK_PTR(skel, "uprobe_syscall__open_and_load"))
125 goto cleanup;
126
127 skel->links.probe = bpf_program__attach_uprobe_opts(skel->progs.probe,
128 0, "/proc/self/exe", offset, &opts);
129 if (!ASSERT_OK_PTR(skel->links.probe, "bpf_program__attach_uprobe_opts"))
130 goto cleanup;
131
132 /* make sure uprobe gets optimized */
133 if (!retprobe)
134 uprobe_regs_trigger();
135
136 uprobe_regs(&before, &after);
137
138 pp = (unsigned long *) &skel->bss->regs;
139 cnt = sizeof(before)/sizeof(*pb);
140
141 for (i = 0; i < cnt; i++) {
142 unsigned int offset = i * sizeof(unsigned long);
143
144 /*
145 * Check register before and after uprobe_regs_trigger call
146 * that triggers the uretprobe.
147 */
148 switch (offset) {
149 case offsetof(struct pt_regs, rax):
150 ASSERT_EQ(pa[i], 0xdeadbeef, "return value");
151 break;
152 default:
153 if (!ASSERT_EQ(pb[i], pa[i], "register before-after value check"))
154 fprintf(stdout, "failed register offset %u\n", offset);
155 }
156
157 /*
158 * Check register seen from bpf program and register after
159 * uprobe_regs_trigger call (with rax exception, check below).
160 */
161 switch (offset) {
162 /*
163 * These values will be different (not set in uretprobe_regs),
164 * we don't care.
165 */
166 case offsetof(struct pt_regs, orig_rax):
167 case offsetof(struct pt_regs, rip):
168 case offsetof(struct pt_regs, cs):
169 case offsetof(struct pt_regs, rsp):
170 case offsetof(struct pt_regs, ss):
171 break;
172 /*
173 * uprobe does not see return value in rax, it needs to see the
174 * original (before) rax value
175 */
176 case offsetof(struct pt_regs, rax):
177 if (!retprobe) {
178 ASSERT_EQ(pp[i], pb[i], "uprobe rax prog-before value check");
179 break;
180 }
181 default:
182 if (!ASSERT_EQ(pp[i], pa[i], "register prog-after value check"))
183 fprintf(stdout, "failed register offset %u\n", offset);
184 }
185 }
186
187 cleanup:
188 uprobe_syscall__destroy(skel);
189 }
190
191 #define BPF_TESTMOD_UPROBE_TEST_FILE "/sys/kernel/bpf_testmod_uprobe"
192
write_bpf_testmod_uprobe(unsigned long offset)193 static int write_bpf_testmod_uprobe(unsigned long offset)
194 {
195 size_t n, ret;
196 char buf[30];
197 int fd;
198
199 n = sprintf(buf, "%lu", offset);
200
201 fd = open(BPF_TESTMOD_UPROBE_TEST_FILE, O_WRONLY);
202 if (fd < 0)
203 return -errno;
204
205 ret = write(fd, buf, n);
206 close(fd);
207 return ret != n ? (int) ret : 0;
208 }
209
test_regs_change(void)210 static void test_regs_change(void)
211 {
212 struct pt_regs before = {}, after = {};
213 unsigned long *pb = (unsigned long *) &before;
214 unsigned long *pa = (unsigned long *) &after;
215 unsigned long cnt = sizeof(before)/sizeof(*pb);
216 unsigned int i, err, offset;
217
218 offset = get_uprobe_offset(uprobe_regs_trigger);
219
220 err = write_bpf_testmod_uprobe(offset);
221 if (!ASSERT_OK(err, "register_uprobe"))
222 return;
223
224 /* make sure uprobe gets optimized */
225 uprobe_regs_trigger();
226
227 uprobe_regs(&before, &after);
228
229 err = write_bpf_testmod_uprobe(0);
230 if (!ASSERT_OK(err, "unregister_uprobe"))
231 return;
232
233 for (i = 0; i < cnt; i++) {
234 unsigned int offset = i * sizeof(unsigned long);
235
236 switch (offset) {
237 case offsetof(struct pt_regs, rax):
238 ASSERT_EQ(pa[i], 0x12345678deadbeef, "rax");
239 break;
240 case offsetof(struct pt_regs, rcx):
241 ASSERT_EQ(pa[i], 0x87654321feebdaed, "rcx");
242 break;
243 case offsetof(struct pt_regs, r11):
244 ASSERT_EQ(pa[i], (__u64) -1, "r11");
245 break;
246 default:
247 if (!ASSERT_EQ(pa[i], pb[i], "register before-after value check"))
248 fprintf(stdout, "failed register offset %u\n", offset);
249 }
250 }
251 }
252
253 #ifndef __NR_uretprobe
254 #define __NR_uretprobe 335
255 #endif
256
uretprobe_syscall_call_1(void)257 __naked unsigned long uretprobe_syscall_call_1(void)
258 {
259 /*
260 * Pretend we are uretprobe trampoline to trigger the return
261 * probe invocation in order to verify we get SIGILL.
262 */
263 asm volatile (
264 "pushq %rax\n"
265 "pushq %rcx\n"
266 "pushq %r11\n"
267 "movq $" __stringify(__NR_uretprobe) ", %rax\n"
268 "syscall\n"
269 "popq %r11\n"
270 "popq %rcx\n"
271 "retq\n"
272 );
273 }
274
uretprobe_syscall_call(void)275 __naked unsigned long uretprobe_syscall_call(void)
276 {
277 asm volatile (
278 "call uretprobe_syscall_call_1\n"
279 "retq\n"
280 );
281 }
282
test_uretprobe_syscall_call(void)283 static void test_uretprobe_syscall_call(void)
284 {
285 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
286 .retprobe = true,
287 );
288 struct uprobe_syscall_executed *skel;
289 int pid, status, err, go[2], c = 0;
290 struct bpf_link *link;
291
292 if (!ASSERT_OK(pipe(go), "pipe"))
293 return;
294
295 skel = uprobe_syscall_executed__open_and_load();
296 if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
297 goto cleanup;
298
299 pid = fork();
300 if (!ASSERT_GE(pid, 0, "fork"))
301 goto cleanup;
302
303 /* child */
304 if (pid == 0) {
305 close(go[1]);
306
307 /* wait for parent's kick */
308 err = read(go[0], &c, 1);
309 if (err != 1)
310 exit(-1);
311
312 uretprobe_syscall_call();
313 _exit(0);
314 }
315
316 skel->bss->pid = pid;
317
318 link = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe_multi,
319 pid, "/proc/self/exe",
320 "uretprobe_syscall_call", &opts);
321 if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
322 goto cleanup;
323 skel->links.test_uretprobe_multi = link;
324
325 /* kick the child */
326 write(go[1], &c, 1);
327 err = waitpid(pid, &status, 0);
328 ASSERT_EQ(err, pid, "waitpid");
329
330 /* verify the child got killed with SIGILL */
331 ASSERT_EQ(WIFSIGNALED(status), 1, "WIFSIGNALED");
332 ASSERT_EQ(WTERMSIG(status), SIGILL, "WTERMSIG");
333
334 /* verify the uretprobe program wasn't called */
335 ASSERT_EQ(skel->bss->executed, 0, "executed");
336
337 cleanup:
338 uprobe_syscall_executed__destroy(skel);
339 close(go[1]);
340 close(go[0]);
341 }
342
343 #define TRAMP "[uprobes-trampoline]"
344
345 __attribute__((aligned(16)))
uprobe_test(void)346 __nocf_check __weak __naked void uprobe_test(void)
347 {
348 asm volatile (" \n"
349 ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00 \n"
350 "ret \n"
351 );
352 }
353
354 __attribute__((aligned(16)))
usdt_test(void)355 __nocf_check __weak void usdt_test(void)
356 {
357 USDT(optimized_uprobe, usdt);
358 }
359
find_uprobes_trampoline(void * tramp_addr)360 static int find_uprobes_trampoline(void *tramp_addr)
361 {
362 void *start, *end;
363 char line[128];
364 int ret = -1;
365 FILE *maps;
366
367 maps = fopen("/proc/self/maps", "r");
368 if (!maps) {
369 fprintf(stderr, "cannot open maps\n");
370 return -1;
371 }
372
373 while (fgets(line, sizeof(line), maps)) {
374 int m = -1;
375
376 /* We care only about private r-x mappings. */
377 if (sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n", &start, &end, &m) != 2)
378 continue;
379 if (m < 0)
380 continue;
381 if (!strncmp(&line[m], TRAMP, sizeof(TRAMP)-1) && (start == tramp_addr)) {
382 ret = 0;
383 break;
384 }
385 }
386
387 fclose(maps);
388 return ret;
389 }
390
391 static unsigned char nop5[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 };
392
find_nop5(void * fn)393 static void *find_nop5(void *fn)
394 {
395 int i;
396
397 for (i = 0; i < 10; i++) {
398 if (!memcmp(nop5, fn + i, 5))
399 return fn + i;
400 }
401 return NULL;
402 }
403
404 typedef void (__attribute__((nocf_check)) *trigger_t)(void);
405
check_attach(struct uprobe_syscall_executed * skel,trigger_t trigger,void * addr,int executed)406 static void *check_attach(struct uprobe_syscall_executed *skel, trigger_t trigger,
407 void *addr, int executed)
408 {
409 struct __arch_relative_insn {
410 __u8 op;
411 __s32 raddr;
412 } __packed *call;
413 void *tramp = NULL;
414
415 /* Uprobe gets optimized after first trigger, so let's press twice. */
416 trigger();
417 trigger();
418
419 /* Make sure bpf program got executed.. */
420 ASSERT_EQ(skel->bss->executed, executed, "executed");
421
422 /* .. and check the trampoline is as expected. */
423 call = (struct __arch_relative_insn *) addr;
424 tramp = (void *) (call + 1) + call->raddr;
425 ASSERT_EQ(call->op, 0xe8, "call");
426 ASSERT_OK(find_uprobes_trampoline(tramp), "uprobes_trampoline");
427
428 return tramp;
429 }
430
check_detach(void * addr,void * tramp)431 static void check_detach(void *addr, void *tramp)
432 {
433 /* [uprobes_trampoline] stays after detach */
434 ASSERT_OK(find_uprobes_trampoline(tramp), "uprobes_trampoline");
435 ASSERT_OK(memcmp(addr, nop5, 5), "nop5");
436 }
437
check(struct uprobe_syscall_executed * skel,struct bpf_link * link,trigger_t trigger,void * addr,int executed)438 static void check(struct uprobe_syscall_executed *skel, struct bpf_link *link,
439 trigger_t trigger, void *addr, int executed)
440 {
441 void *tramp;
442
443 tramp = check_attach(skel, trigger, addr, executed);
444 bpf_link__destroy(link);
445 check_detach(addr, tramp);
446 }
447
test_uprobe_legacy(void)448 static void test_uprobe_legacy(void)
449 {
450 struct uprobe_syscall_executed *skel = NULL;
451 LIBBPF_OPTS(bpf_uprobe_opts, opts,
452 .retprobe = true,
453 );
454 struct bpf_link *link;
455 unsigned long offset;
456
457 offset = get_uprobe_offset(&uprobe_test);
458 if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
459 goto cleanup;
460
461 /* uprobe */
462 skel = uprobe_syscall_executed__open_and_load();
463 if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
464 return;
465
466 skel->bss->pid = getpid();
467
468 link = bpf_program__attach_uprobe_opts(skel->progs.test_uprobe,
469 0, "/proc/self/exe", offset, NULL);
470 if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_opts"))
471 goto cleanup;
472
473 check(skel, link, uprobe_test, uprobe_test, 2);
474
475 /* uretprobe */
476 skel->bss->executed = 0;
477
478 link = bpf_program__attach_uprobe_opts(skel->progs.test_uretprobe,
479 0, "/proc/self/exe", offset, &opts);
480 if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_opts"))
481 goto cleanup;
482
483 check(skel, link, uprobe_test, uprobe_test, 2);
484
485 cleanup:
486 uprobe_syscall_executed__destroy(skel);
487 }
488
test_uprobe_multi(void)489 static void test_uprobe_multi(void)
490 {
491 struct uprobe_syscall_executed *skel = NULL;
492 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
493 struct bpf_link *link;
494 unsigned long offset;
495
496 offset = get_uprobe_offset(&uprobe_test);
497 if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
498 goto cleanup;
499
500 opts.offsets = &offset;
501 opts.cnt = 1;
502
503 skel = uprobe_syscall_executed__open_and_load();
504 if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
505 return;
506
507 skel->bss->pid = getpid();
508
509 /* uprobe.multi */
510 link = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_multi,
511 0, "/proc/self/exe", NULL, &opts);
512 if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
513 goto cleanup;
514
515 check(skel, link, uprobe_test, uprobe_test, 2);
516
517 /* uretprobe.multi */
518 skel->bss->executed = 0;
519 opts.retprobe = true;
520 link = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe_multi,
521 0, "/proc/self/exe", NULL, &opts);
522 if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
523 goto cleanup;
524
525 check(skel, link, uprobe_test, uprobe_test, 2);
526
527 cleanup:
528 uprobe_syscall_executed__destroy(skel);
529 }
530
test_uprobe_session(void)531 static void test_uprobe_session(void)
532 {
533 struct uprobe_syscall_executed *skel = NULL;
534 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts,
535 .session = true,
536 );
537 struct bpf_link *link;
538 unsigned long offset;
539
540 offset = get_uprobe_offset(&uprobe_test);
541 if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
542 goto cleanup;
543
544 opts.offsets = &offset;
545 opts.cnt = 1;
546
547 skel = uprobe_syscall_executed__open_and_load();
548 if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
549 return;
550
551 skel->bss->pid = getpid();
552
553 link = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_session,
554 0, "/proc/self/exe", NULL, &opts);
555 if (!ASSERT_OK_PTR(link, "bpf_program__attach_uprobe_multi"))
556 goto cleanup;
557
558 check(skel, link, uprobe_test, uprobe_test, 4);
559
560 cleanup:
561 uprobe_syscall_executed__destroy(skel);
562 }
563
test_uprobe_usdt(void)564 static void test_uprobe_usdt(void)
565 {
566 struct uprobe_syscall_executed *skel;
567 struct bpf_link *link;
568 void *addr;
569
570 errno = 0;
571 addr = find_nop5(usdt_test);
572 if (!ASSERT_OK_PTR(addr, "find_nop5"))
573 return;
574
575 skel = uprobe_syscall_executed__open_and_load();
576 if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
577 return;
578
579 skel->bss->pid = getpid();
580
581 link = bpf_program__attach_usdt(skel->progs.test_usdt,
582 -1 /* all PIDs */, "/proc/self/exe",
583 "optimized_uprobe", "usdt", NULL);
584 if (!ASSERT_OK_PTR(link, "bpf_program__attach_usdt"))
585 goto cleanup;
586
587 check(skel, link, usdt_test, addr, 2);
588
589 cleanup:
590 uprobe_syscall_executed__destroy(skel);
591 }
592
593 /*
594 * Borrowed from tools/testing/selftests/x86/test_shadow_stack.c.
595 *
596 * For use in inline enablement of shadow stack.
597 *
598 * The program can't return from the point where shadow stack gets enabled
599 * because there will be no address on the shadow stack. So it can't use
600 * syscall() for enablement, since it is a function.
601 *
602 * Based on code from nolibc.h. Keep a copy here because this can't pull
603 * in all of nolibc.h.
604 */
605 #define ARCH_PRCTL(arg1, arg2) \
606 ({ \
607 long _ret; \
608 register long _num asm("eax") = __NR_arch_prctl; \
609 register long _arg1 asm("rdi") = (long)(arg1); \
610 register long _arg2 asm("rsi") = (long)(arg2); \
611 \
612 asm volatile ( \
613 "syscall\n" \
614 : "=a"(_ret) \
615 : "r"(_arg1), "r"(_arg2), \
616 "0"(_num) \
617 : "rcx", "r11", "memory", "cc" \
618 ); \
619 _ret; \
620 })
621
622 #ifndef ARCH_SHSTK_ENABLE
623 #define ARCH_SHSTK_ENABLE 0x5001
624 #define ARCH_SHSTK_DISABLE 0x5002
625 #define ARCH_SHSTK_SHSTK (1ULL << 0)
626 #endif
627
test_uretprobe_shadow_stack(void)628 static void test_uretprobe_shadow_stack(void)
629 {
630 if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK)) {
631 test__skip();
632 return;
633 }
634
635 /* Run all the tests with shadow stack in place. */
636
637 test_uprobe_regs_equal(false);
638 test_uprobe_regs_equal(true);
639 test_uretprobe_syscall_call();
640
641 test_uprobe_legacy();
642 test_uprobe_multi();
643 test_uprobe_session();
644 test_uprobe_usdt();
645
646 test_regs_change();
647
648 ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK);
649 }
650
651 static volatile bool race_stop;
652
653 static USDT_DEFINE_SEMA(race);
654
worker_trigger(void * arg)655 static void *worker_trigger(void *arg)
656 {
657 unsigned long rounds = 0;
658
659 while (!race_stop) {
660 uprobe_test();
661 rounds++;
662 }
663
664 printf("tid %d trigger rounds: %lu\n", gettid(), rounds);
665 return NULL;
666 }
667
worker_attach(void * arg)668 static void *worker_attach(void *arg)
669 {
670 LIBBPF_OPTS(bpf_uprobe_opts, opts);
671 struct uprobe_syscall_executed *skel;
672 unsigned long rounds = 0, offset;
673 const char *sema[2] = {
674 __stringify(USDT_SEMA(race)),
675 NULL,
676 };
677 unsigned long *ref;
678 int err;
679
680 offset = get_uprobe_offset(&uprobe_test);
681 if (!ASSERT_GE(offset, 0, "get_uprobe_offset"))
682 return NULL;
683
684 err = elf_resolve_syms_offsets("/proc/self/exe", 1, (const char **) &sema, &ref, STT_OBJECT);
685 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema"))
686 return NULL;
687
688 opts.ref_ctr_offset = *ref;
689
690 skel = uprobe_syscall_executed__open_and_load();
691 if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load"))
692 return NULL;
693
694 skel->bss->pid = getpid();
695
696 while (!race_stop) {
697 skel->links.test_uprobe = bpf_program__attach_uprobe_opts(skel->progs.test_uprobe,
698 0, "/proc/self/exe", offset, &opts);
699 if (!ASSERT_OK_PTR(skel->links.test_uprobe, "bpf_program__attach_uprobe_opts"))
700 break;
701
702 bpf_link__destroy(skel->links.test_uprobe);
703 skel->links.test_uprobe = NULL;
704 rounds++;
705 }
706
707 printf("tid %d attach rounds: %lu hits: %d\n", gettid(), rounds, skel->bss->executed);
708 uprobe_syscall_executed__destroy(skel);
709 free(ref);
710 return NULL;
711 }
712
race_msec(void)713 static useconds_t race_msec(void)
714 {
715 char *env;
716
717 env = getenv("BPF_SELFTESTS_UPROBE_SYSCALL_RACE_MSEC");
718 if (env)
719 return atoi(env);
720
721 /* default duration is 500ms */
722 return 500;
723 }
724
test_uprobe_race(void)725 static void test_uprobe_race(void)
726 {
727 int err, i, nr_threads;
728 pthread_t *threads;
729
730 nr_threads = libbpf_num_possible_cpus();
731 if (!ASSERT_GT(nr_threads, 0, "libbpf_num_possible_cpus"))
732 return;
733 nr_threads = max(2, nr_threads);
734
735 threads = alloca(sizeof(*threads) * nr_threads);
736 if (!ASSERT_OK_PTR(threads, "malloc"))
737 return;
738
739 for (i = 0; i < nr_threads; i++) {
740 err = pthread_create(&threads[i], NULL, i % 2 ? worker_trigger : worker_attach,
741 NULL);
742 if (!ASSERT_OK(err, "pthread_create"))
743 goto cleanup;
744 }
745
746 usleep(race_msec() * 1000);
747
748 cleanup:
749 race_stop = true;
750 for (nr_threads = i, i = 0; i < nr_threads; i++)
751 pthread_join(threads[i], NULL);
752
753 ASSERT_FALSE(USDT_SEMA_IS_ACTIVE(race), "race_semaphore");
754 }
755
756 #ifndef __NR_uprobe
757 #define __NR_uprobe 336
758 #endif
759
test_uprobe_error(void)760 static void test_uprobe_error(void)
761 {
762 long err = syscall(__NR_uprobe);
763
764 ASSERT_EQ(err, -1, "error");
765 ASSERT_EQ(errno, ENXIO, "errno");
766 }
767
__test_uprobe_syscall(void)768 static void __test_uprobe_syscall(void)
769 {
770 if (test__start_subtest("uretprobe_regs_equal"))
771 test_uprobe_regs_equal(true);
772 if (test__start_subtest("uretprobe_syscall_call"))
773 test_uretprobe_syscall_call();
774 if (test__start_subtest("uretprobe_shadow_stack"))
775 test_uretprobe_shadow_stack();
776 if (test__start_subtest("uprobe_legacy"))
777 test_uprobe_legacy();
778 if (test__start_subtest("uprobe_multi"))
779 test_uprobe_multi();
780 if (test__start_subtest("uprobe_session"))
781 test_uprobe_session();
782 if (test__start_subtest("uprobe_usdt"))
783 test_uprobe_usdt();
784 if (test__start_subtest("uprobe_race"))
785 test_uprobe_race();
786 if (test__start_subtest("uprobe_error"))
787 test_uprobe_error();
788 if (test__start_subtest("uprobe_regs_equal"))
789 test_uprobe_regs_equal(false);
790 if (test__start_subtest("regs_change"))
791 test_regs_change();
792 }
793 #else
__test_uprobe_syscall(void)794 static void __test_uprobe_syscall(void)
795 {
796 test__skip();
797 }
798 #endif
799
test_uprobe_syscall(void)800 void test_uprobe_syscall(void)
801 {
802 __test_uprobe_syscall();
803 }
804