1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <unistd.h>
4 #include <pthread.h>
5 #include <test_progs.h>
6 #include "uprobe_multi.skel.h"
7 #include "uprobe_multi_bench.skel.h"
8 #include "uprobe_multi_usdt.skel.h"
9 #include "uprobe_multi_consumers.skel.h"
10 #include "uprobe_multi_pid_filter.skel.h"
11 #include "bpf/libbpf_internal.h"
12 #include "testing_helpers.h"
13 #include "../sdt.h"
14
15 static char test_data[] = "test_data";
16
uprobe_multi_func_1(void)17 noinline void uprobe_multi_func_1(void)
18 {
19 asm volatile ("");
20 }
21
uprobe_multi_func_2(void)22 noinline void uprobe_multi_func_2(void)
23 {
24 asm volatile ("");
25 }
26
uprobe_multi_func_3(void)27 noinline void uprobe_multi_func_3(void)
28 {
29 asm volatile ("");
30 }
31
usdt_trigger(void)32 noinline void usdt_trigger(void)
33 {
34 STAP_PROBE(test, pid_filter_usdt);
35 }
36
37 struct child {
38 int go[2];
39 int c2p[2]; /* child -> parent channel */
40 int pid;
41 int tid;
42 pthread_t thread;
43 char stack[65536];
44 };
45
release_child(struct child * child)46 static void release_child(struct child *child)
47 {
48 int child_status;
49
50 if (!child)
51 return;
52 close(child->go[1]);
53 close(child->go[0]);
54 if (child->thread)
55 pthread_join(child->thread, NULL);
56 close(child->c2p[0]);
57 close(child->c2p[1]);
58 if (child->pid > 0)
59 waitpid(child->pid, &child_status, 0);
60 }
61
kick_child(struct child * child)62 static void kick_child(struct child *child)
63 {
64 char c = 1;
65
66 if (child) {
67 write(child->go[1], &c, 1);
68 release_child(child);
69 }
70 fflush(NULL);
71 }
72
child_func(void * arg)73 static int child_func(void *arg)
74 {
75 struct child *child = arg;
76 int err, c;
77
78 close(child->go[1]);
79
80 /* wait for parent's kick */
81 err = read(child->go[0], &c, 1);
82 if (err != 1)
83 exit(err);
84
85 uprobe_multi_func_1();
86 uprobe_multi_func_2();
87 uprobe_multi_func_3();
88 usdt_trigger();
89
90 exit(errno);
91 }
92
spawn_child_flag(struct child * child,bool clone_vm)93 static int spawn_child_flag(struct child *child, bool clone_vm)
94 {
95 /* pipe to notify child to execute the trigger functions */
96 if (pipe(child->go))
97 return -1;
98
99 if (clone_vm) {
100 child->pid = child->tid = clone(child_func, child->stack + sizeof(child->stack)/2,
101 CLONE_VM|SIGCHLD, child);
102 } else {
103 child->pid = child->tid = fork();
104 }
105 if (child->pid < 0) {
106 release_child(child);
107 errno = EINVAL;
108 return -1;
109 }
110
111 /* fork-ed child */
112 if (!clone_vm && child->pid == 0)
113 child_func(child);
114
115 return 0;
116 }
117
spawn_child(struct child * child)118 static int spawn_child(struct child *child)
119 {
120 return spawn_child_flag(child, false);
121 }
122
child_thread(void * ctx)123 static void *child_thread(void *ctx)
124 {
125 struct child *child = ctx;
126 int c = 0, err;
127
128 child->tid = syscall(SYS_gettid);
129
130 /* let parent know we are ready */
131 err = write(child->c2p[1], &c, 1);
132 if (err != 1)
133 pthread_exit(&err);
134
135 /* wait for parent's kick */
136 err = read(child->go[0], &c, 1);
137 if (err != 1)
138 pthread_exit(&err);
139
140 uprobe_multi_func_1();
141 uprobe_multi_func_2();
142 uprobe_multi_func_3();
143 usdt_trigger();
144
145 err = 0;
146 pthread_exit(&err);
147 }
148
spawn_thread(struct child * child)149 static int spawn_thread(struct child *child)
150 {
151 int c, err;
152
153 /* pipe to notify child to execute the trigger functions */
154 if (pipe(child->go))
155 return -1;
156 /* pipe to notify parent that child thread is ready */
157 if (pipe(child->c2p)) {
158 close(child->go[0]);
159 close(child->go[1]);
160 return -1;
161 }
162
163 child->pid = getpid();
164
165 err = pthread_create(&child->thread, NULL, child_thread, child);
166 if (err) {
167 err = -errno;
168 close(child->go[0]);
169 close(child->go[1]);
170 close(child->c2p[0]);
171 close(child->c2p[1]);
172 errno = -err;
173 return -1;
174 }
175
176 err = read(child->c2p[0], &c, 1);
177 if (!ASSERT_EQ(err, 1, "child_thread_ready"))
178 return -1;
179
180 return 0;
181 }
182
uprobe_multi_test_run(struct uprobe_multi * skel,struct child * child)183 static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
184 {
185 skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
186 skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
187 skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
188
189 skel->bss->user_ptr = test_data;
190
191 /*
192 * Disable pid check in bpf program if we are pid filter test,
193 * because the probe should be executed only by child->pid
194 * passed at the probe attach.
195 */
196 skel->bss->pid = child ? 0 : getpid();
197 skel->bss->expect_pid = child ? child->pid : 0;
198
199 /* trigger all probes, if we are testing child *process*, just to make
200 * sure that PID filtering doesn't let through activations from wrong
201 * PIDs; when we test child *thread*, we don't want to do this to
202 * avoid double counting number of triggering events
203 */
204 if (!child || !child->thread) {
205 uprobe_multi_func_1();
206 uprobe_multi_func_2();
207 uprobe_multi_func_3();
208 usdt_trigger();
209 }
210
211 if (child)
212 kick_child(child);
213
214 /*
215 * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
216 * function and each sleepable probe (6) increments uprobe_multi_sleep_result.
217 */
218 ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result");
219 ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result");
220 ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result");
221
222 ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result");
223 ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result");
224 ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result");
225
226 ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
227
228 ASSERT_FALSE(skel->bss->bad_pid_seen, "bad_pid_seen");
229
230 if (child) {
231 ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
232 ASSERT_EQ(skel->bss->child_tid, child->tid, "uprobe_multi_child_tid");
233 }
234 }
235
test_skel_api(void)236 static void test_skel_api(void)
237 {
238 struct uprobe_multi *skel = NULL;
239 int err;
240
241 skel = uprobe_multi__open_and_load();
242 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
243 goto cleanup;
244
245 err = uprobe_multi__attach(skel);
246 if (!ASSERT_OK(err, "uprobe_multi__attach"))
247 goto cleanup;
248
249 uprobe_multi_test_run(skel, NULL);
250
251 cleanup:
252 uprobe_multi__destroy(skel);
253 }
254
255 static void
__test_attach_api(const char * binary,const char * pattern,struct bpf_uprobe_multi_opts * opts,struct child * child)256 __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts,
257 struct child *child)
258 {
259 pid_t pid = child ? child->pid : -1;
260 struct uprobe_multi *skel = NULL;
261
262 skel = uprobe_multi__open_and_load();
263 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
264 goto cleanup;
265
266 opts->retprobe = false;
267 skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid,
268 binary, pattern, opts);
269 if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi"))
270 goto cleanup;
271
272 opts->retprobe = true;
273 skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid,
274 binary, pattern, opts);
275 if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi"))
276 goto cleanup;
277
278 opts->retprobe = false;
279 skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid,
280 binary, pattern, opts);
281 if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi"))
282 goto cleanup;
283
284 opts->retprobe = true;
285 skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep,
286 pid, binary, pattern, opts);
287 if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi"))
288 goto cleanup;
289
290 opts->retprobe = false;
291 skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1,
292 binary, pattern, opts);
293 if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
294 goto cleanup;
295
296 /* Attach (uprobe-backed) USDTs */
297 skel->links.usdt_pid = bpf_program__attach_usdt(skel->progs.usdt_pid, pid, binary,
298 "test", "pid_filter_usdt", NULL);
299 if (!ASSERT_OK_PTR(skel->links.usdt_pid, "attach_usdt_pid"))
300 goto cleanup;
301
302 skel->links.usdt_extra = bpf_program__attach_usdt(skel->progs.usdt_extra, -1, binary,
303 "test", "pid_filter_usdt", NULL);
304 if (!ASSERT_OK_PTR(skel->links.usdt_extra, "attach_usdt_extra"))
305 goto cleanup;
306
307 uprobe_multi_test_run(skel, child);
308
309 ASSERT_FALSE(skel->bss->bad_pid_seen_usdt, "bad_pid_seen_usdt");
310 if (child) {
311 ASSERT_EQ(skel->bss->child_pid_usdt, child->pid, "usdt_multi_child_pid");
312 ASSERT_EQ(skel->bss->child_tid_usdt, child->tid, "usdt_multi_child_tid");
313 }
314 cleanup:
315 uprobe_multi__destroy(skel);
316 }
317
318 static void
test_attach_api(const char * binary,const char * pattern,struct bpf_uprobe_multi_opts * opts)319 test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts)
320 {
321 static struct child child;
322
323 /* no pid filter */
324 __test_attach_api(binary, pattern, opts, NULL);
325
326 /* pid filter */
327 if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
328 return;
329
330 __test_attach_api(binary, pattern, opts, &child);
331
332 /* pid filter (thread) */
333 if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
334 return;
335
336 __test_attach_api(binary, pattern, opts, &child);
337 }
338
test_attach_api_pattern(void)339 static void test_attach_api_pattern(void)
340 {
341 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
342
343 test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts);
344 test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts);
345 }
346
test_attach_api_syms(void)347 static void test_attach_api_syms(void)
348 {
349 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
350 const char *syms[3] = {
351 "uprobe_multi_func_1",
352 "uprobe_multi_func_2",
353 "uprobe_multi_func_3",
354 };
355
356 opts.syms = syms;
357 opts.cnt = ARRAY_SIZE(syms);
358 test_attach_api("/proc/self/exe", NULL, &opts);
359 }
360
test_attach_api_fails(void)361 static void test_attach_api_fails(void)
362 {
363 LIBBPF_OPTS(bpf_link_create_opts, opts);
364 const char *path = "/proc/self/exe";
365 struct uprobe_multi *skel = NULL;
366 int prog_fd, link_fd = -1;
367 unsigned long offset = 0;
368
369 skel = uprobe_multi__open_and_load();
370 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
371 goto cleanup;
372
373 prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
374
375 /* abnormal cnt */
376 opts.uprobe_multi.path = path;
377 opts.uprobe_multi.offsets = &offset;
378 opts.uprobe_multi.cnt = INT_MAX;
379 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
380 if (!ASSERT_ERR(link_fd, "link_fd"))
381 goto cleanup;
382 if (!ASSERT_EQ(link_fd, -E2BIG, "big cnt"))
383 goto cleanup;
384
385 /* cnt is 0 */
386 LIBBPF_OPTS_RESET(opts,
387 .uprobe_multi.path = path,
388 .uprobe_multi.offsets = (unsigned long *) &offset,
389 );
390
391 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
392 if (!ASSERT_ERR(link_fd, "link_fd"))
393 goto cleanup;
394 if (!ASSERT_EQ(link_fd, -EINVAL, "cnt_is_zero"))
395 goto cleanup;
396
397 /* negative offset */
398 offset = -1;
399 opts.uprobe_multi.path = path;
400 opts.uprobe_multi.offsets = (unsigned long *) &offset;
401 opts.uprobe_multi.cnt = 1;
402
403 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
404 if (!ASSERT_ERR(link_fd, "link_fd"))
405 goto cleanup;
406 if (!ASSERT_EQ(link_fd, -EINVAL, "offset_is_negative"))
407 goto cleanup;
408
409 /* offsets is NULL */
410 LIBBPF_OPTS_RESET(opts,
411 .uprobe_multi.path = path,
412 .uprobe_multi.cnt = 1,
413 );
414
415 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
416 if (!ASSERT_ERR(link_fd, "link_fd"))
417 goto cleanup;
418 if (!ASSERT_EQ(link_fd, -EINVAL, "offsets_is_null"))
419 goto cleanup;
420
421 /* wrong offsets pointer */
422 LIBBPF_OPTS_RESET(opts,
423 .uprobe_multi.path = path,
424 .uprobe_multi.offsets = (unsigned long *) 1,
425 .uprobe_multi.cnt = 1,
426 );
427
428 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
429 if (!ASSERT_ERR(link_fd, "link_fd"))
430 goto cleanup;
431 if (!ASSERT_EQ(link_fd, -EFAULT, "offsets_is_wrong"))
432 goto cleanup;
433
434 /* path is NULL */
435 offset = 1;
436 LIBBPF_OPTS_RESET(opts,
437 .uprobe_multi.offsets = (unsigned long *) &offset,
438 .uprobe_multi.cnt = 1,
439 );
440
441 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
442 if (!ASSERT_ERR(link_fd, "link_fd"))
443 goto cleanup;
444 if (!ASSERT_EQ(link_fd, -EINVAL, "path_is_null"))
445 goto cleanup;
446
447 /* wrong path pointer */
448 LIBBPF_OPTS_RESET(opts,
449 .uprobe_multi.path = (const char *) 1,
450 .uprobe_multi.offsets = (unsigned long *) &offset,
451 .uprobe_multi.cnt = 1,
452 );
453
454 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
455 if (!ASSERT_ERR(link_fd, "link_fd"))
456 goto cleanup;
457 if (!ASSERT_EQ(link_fd, -EFAULT, "path_is_wrong"))
458 goto cleanup;
459
460 /* wrong path type */
461 LIBBPF_OPTS_RESET(opts,
462 .uprobe_multi.path = "/",
463 .uprobe_multi.offsets = (unsigned long *) &offset,
464 .uprobe_multi.cnt = 1,
465 );
466
467 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
468 if (!ASSERT_ERR(link_fd, "link_fd"))
469 goto cleanup;
470 if (!ASSERT_EQ(link_fd, -EBADF, "path_is_wrong_type"))
471 goto cleanup;
472
473 /* wrong cookies pointer */
474 LIBBPF_OPTS_RESET(opts,
475 .uprobe_multi.path = path,
476 .uprobe_multi.offsets = (unsigned long *) &offset,
477 .uprobe_multi.cookies = (__u64 *) 1ULL,
478 .uprobe_multi.cnt = 1,
479 );
480
481 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
482 if (!ASSERT_ERR(link_fd, "link_fd"))
483 goto cleanup;
484 if (!ASSERT_EQ(link_fd, -EFAULT, "cookies_is_wrong"))
485 goto cleanup;
486
487 /* wrong ref_ctr_offsets pointer */
488 LIBBPF_OPTS_RESET(opts,
489 .uprobe_multi.path = path,
490 .uprobe_multi.offsets = (unsigned long *) &offset,
491 .uprobe_multi.cookies = (__u64 *) &offset,
492 .uprobe_multi.ref_ctr_offsets = (unsigned long *) 1,
493 .uprobe_multi.cnt = 1,
494 );
495
496 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
497 if (!ASSERT_ERR(link_fd, "link_fd"))
498 goto cleanup;
499 if (!ASSERT_EQ(link_fd, -EFAULT, "ref_ctr_offsets_is_wrong"))
500 goto cleanup;
501
502 /* wrong flags */
503 LIBBPF_OPTS_RESET(opts,
504 .uprobe_multi.flags = 1 << 31,
505 );
506
507 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
508 if (!ASSERT_ERR(link_fd, "link_fd"))
509 goto cleanup;
510 if (!ASSERT_EQ(link_fd, -EINVAL, "wrong_flags"))
511 goto cleanup;
512
513 /* wrong pid */
514 LIBBPF_OPTS_RESET(opts,
515 .uprobe_multi.path = path,
516 .uprobe_multi.offsets = (unsigned long *) &offset,
517 .uprobe_multi.cnt = 1,
518 .uprobe_multi.pid = -2,
519 );
520
521 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
522 if (!ASSERT_ERR(link_fd, "link_fd"))
523 goto cleanup;
524 ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
525
526 cleanup:
527 if (link_fd >= 0)
528 close(link_fd);
529 uprobe_multi__destroy(skel);
530 }
531
532 #ifdef __x86_64__
uprobe_multi_error_func(void)533 noinline void uprobe_multi_error_func(void)
534 {
535 /*
536 * If --fcf-protection=branch is enabled the gcc generates endbr as
537 * first instruction, so marking the exact address of int3 with the
538 * symbol to be used in the attach_uprobe_fail_trap test below.
539 */
540 asm volatile (
541 ".globl uprobe_multi_error_func_int3; \n"
542 "uprobe_multi_error_func_int3: \n"
543 "int3 \n"
544 );
545 }
546
547 /*
548 * Attaching uprobe on uprobe_multi_error_func results in error
549 * because it already starts with int3 instruction.
550 */
attach_uprobe_fail_trap(struct uprobe_multi * skel)551 static void attach_uprobe_fail_trap(struct uprobe_multi *skel)
552 {
553 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
554 const char *syms[4] = {
555 "uprobe_multi_func_1",
556 "uprobe_multi_func_2",
557 "uprobe_multi_func_3",
558 "uprobe_multi_error_func_int3",
559 };
560
561 opts.syms = syms;
562 opts.cnt = ARRAY_SIZE(syms);
563
564 skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1,
565 "/proc/self/exe", NULL, &opts);
566 if (!ASSERT_ERR_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) {
567 bpf_link__destroy(skel->links.uprobe);
568 skel->links.uprobe = NULL;
569 }
570 }
571 #else
attach_uprobe_fail_trap(struct uprobe_multi * skel)572 static void attach_uprobe_fail_trap(struct uprobe_multi *skel) { }
573 #endif
574
575 short sema_1 __used, sema_2 __used;
576
attach_uprobe_fail_refctr(struct uprobe_multi * skel)577 static void attach_uprobe_fail_refctr(struct uprobe_multi *skel)
578 {
579 unsigned long *tmp_offsets = NULL, *tmp_ref_ctr_offsets = NULL;
580 unsigned long offsets[3], ref_ctr_offsets[3];
581 LIBBPF_OPTS(bpf_link_create_opts, opts);
582 const char *path = "/proc/self/exe";
583 const char *syms[3] = {
584 "uprobe_multi_func_1",
585 "uprobe_multi_func_2",
586 };
587 const char *sema[3] = {
588 "sema_1",
589 "sema_2",
590 };
591 int prog_fd, link_fd, err;
592
593 prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
594
595 err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &syms,
596 &tmp_offsets, STT_FUNC);
597 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func"))
598 return;
599
600 err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &sema,
601 &tmp_ref_ctr_offsets, STT_OBJECT);
602 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema"))
603 goto cleanup;
604
605 /*
606 * We attach to 3 uprobes on 2 functions, so 2 uprobes share single function,
607 * but with different ref_ctr_offset which is not allowed and results in fail.
608 */
609 offsets[0] = tmp_offsets[0]; /* uprobe_multi_func_1 */
610 offsets[1] = tmp_offsets[1]; /* uprobe_multi_func_2 */
611 offsets[2] = tmp_offsets[1]; /* uprobe_multi_func_2 */
612
613 ref_ctr_offsets[0] = tmp_ref_ctr_offsets[0]; /* sema_1 */
614 ref_ctr_offsets[1] = tmp_ref_ctr_offsets[1]; /* sema_2 */
615 ref_ctr_offsets[2] = tmp_ref_ctr_offsets[0]; /* sema_1, error */
616
617 opts.uprobe_multi.path = path;
618 opts.uprobe_multi.offsets = (const unsigned long *) &offsets;
619 opts.uprobe_multi.ref_ctr_offsets = (const unsigned long *) &ref_ctr_offsets;
620 opts.uprobe_multi.cnt = 3;
621
622 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
623 if (!ASSERT_ERR(link_fd, "link_fd"))
624 close(link_fd);
625
626 cleanup:
627 free(tmp_ref_ctr_offsets);
628 free(tmp_offsets);
629 }
630
test_attach_uprobe_fails(void)631 static void test_attach_uprobe_fails(void)
632 {
633 struct uprobe_multi *skel = NULL;
634
635 skel = uprobe_multi__open_and_load();
636 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
637 return;
638
639 /* attach fails due to adding uprobe on trap instruction, x86_64 only */
640 attach_uprobe_fail_trap(skel);
641
642 /* attach fail due to wrong ref_ctr_offs on one of the uprobes */
643 attach_uprobe_fail_refctr(skel);
644
645 uprobe_multi__destroy(skel);
646 }
647
__test_link_api(struct child * child)648 static void __test_link_api(struct child *child)
649 {
650 int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1;
651 LIBBPF_OPTS(bpf_link_create_opts, opts);
652 const char *path = "/proc/self/exe";
653 struct uprobe_multi *skel = NULL;
654 unsigned long *offsets = NULL;
655 const char *syms[3] = {
656 "uprobe_multi_func_1",
657 "uprobe_multi_func_2",
658 "uprobe_multi_func_3",
659 };
660 int link_extra_fd = -1;
661 int err;
662
663 err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets, STT_FUNC);
664 if (!ASSERT_OK(err, "elf_resolve_syms_offsets"))
665 return;
666
667 opts.uprobe_multi.path = path;
668 opts.uprobe_multi.offsets = offsets;
669 opts.uprobe_multi.cnt = ARRAY_SIZE(syms);
670 opts.uprobe_multi.pid = child ? child->pid : 0;
671
672 skel = uprobe_multi__open_and_load();
673 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
674 goto cleanup;
675
676 opts.kprobe_multi.flags = 0;
677 prog_fd = bpf_program__fd(skel->progs.uprobe);
678 link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
679 if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
680 goto cleanup;
681
682 opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
683 prog_fd = bpf_program__fd(skel->progs.uretprobe);
684 link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
685 if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
686 goto cleanup;
687
688 opts.kprobe_multi.flags = 0;
689 prog_fd = bpf_program__fd(skel->progs.uprobe_sleep);
690 link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
691 if (!ASSERT_GE(link3_fd, 0, "link3_fd"))
692 goto cleanup;
693
694 opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
695 prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep);
696 link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
697 if (!ASSERT_GE(link4_fd, 0, "link4_fd"))
698 goto cleanup;
699
700 opts.kprobe_multi.flags = 0;
701 opts.uprobe_multi.pid = 0;
702 prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
703 link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
704 if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd"))
705 goto cleanup;
706
707 uprobe_multi_test_run(skel, child);
708
709 cleanup:
710 if (link1_fd >= 0)
711 close(link1_fd);
712 if (link2_fd >= 0)
713 close(link2_fd);
714 if (link3_fd >= 0)
715 close(link3_fd);
716 if (link4_fd >= 0)
717 close(link4_fd);
718 if (link_extra_fd >= 0)
719 close(link_extra_fd);
720
721 uprobe_multi__destroy(skel);
722 free(offsets);
723 }
724
test_link_api(void)725 static void test_link_api(void)
726 {
727 static struct child child;
728
729 /* no pid filter */
730 __test_link_api(NULL);
731
732 /* pid filter */
733 if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
734 return;
735
736 __test_link_api(&child);
737
738 /* pid filter (thread) */
739 if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
740 return;
741
742 __test_link_api(&child);
743 }
744
745 static struct bpf_program *
get_program(struct uprobe_multi_consumers * skel,int prog)746 get_program(struct uprobe_multi_consumers *skel, int prog)
747 {
748 switch (prog) {
749 case 0:
750 return skel->progs.uprobe_0;
751 case 1:
752 return skel->progs.uprobe_1;
753 case 2:
754 return skel->progs.uprobe_2;
755 case 3:
756 return skel->progs.uprobe_3;
757 default:
758 ASSERT_FAIL("get_program");
759 return NULL;
760 }
761 }
762
763 static struct bpf_link **
get_link(struct uprobe_multi_consumers * skel,int link)764 get_link(struct uprobe_multi_consumers *skel, int link)
765 {
766 switch (link) {
767 case 0:
768 return &skel->links.uprobe_0;
769 case 1:
770 return &skel->links.uprobe_1;
771 case 2:
772 return &skel->links.uprobe_2;
773 case 3:
774 return &skel->links.uprobe_3;
775 default:
776 ASSERT_FAIL("get_link");
777 return NULL;
778 }
779 }
780
uprobe_attach(struct uprobe_multi_consumers * skel,int idx)781 static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx)
782 {
783 struct bpf_program *prog = get_program(skel, idx);
784 struct bpf_link **link = get_link(skel, idx);
785 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
786
787 if (!prog || !link)
788 return -1;
789
790 /*
791 * bit/prog: 0,1 uprobe entry
792 * bit/prog: 2,3 uprobe return
793 */
794 opts.retprobe = idx == 2 || idx == 3;
795
796 *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe",
797 "uprobe_consumer_test",
798 &opts);
799 if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi"))
800 return -1;
801 return 0;
802 }
803
uprobe_detach(struct uprobe_multi_consumers * skel,int idx)804 static void uprobe_detach(struct uprobe_multi_consumers *skel, int idx)
805 {
806 struct bpf_link **link = get_link(skel, idx);
807
808 bpf_link__destroy(*link);
809 *link = NULL;
810 }
811
test_bit(int bit,unsigned long val)812 static bool test_bit(int bit, unsigned long val)
813 {
814 return val & (1 << bit);
815 }
816
817 noinline int
uprobe_consumer_test(struct uprobe_multi_consumers * skel,unsigned long before,unsigned long after)818 uprobe_consumer_test(struct uprobe_multi_consumers *skel,
819 unsigned long before, unsigned long after)
820 {
821 int idx;
822
823 /* detach uprobe for each unset programs in 'before' state ... */
824 for (idx = 0; idx < 4; idx++) {
825 if (test_bit(idx, before) && !test_bit(idx, after))
826 uprobe_detach(skel, idx);
827 }
828
829 /* ... and attach all new programs in 'after' state */
830 for (idx = 0; idx < 4; idx++) {
831 if (!test_bit(idx, before) && test_bit(idx, after)) {
832 if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_after"))
833 return -1;
834 }
835 }
836 return 0;
837 }
838
consumer_test(struct uprobe_multi_consumers * skel,unsigned long before,unsigned long after)839 static void consumer_test(struct uprobe_multi_consumers *skel,
840 unsigned long before, unsigned long after)
841 {
842 int err, idx;
843
844 printf("consumer_test before %lu after %lu\n", before, after);
845
846 /* 'before' is each, we attach uprobe for every set idx */
847 for (idx = 0; idx < 4; idx++) {
848 if (test_bit(idx, before)) {
849 if (!ASSERT_OK(uprobe_attach(skel, idx), "uprobe_attach_before"))
850 goto cleanup;
851 }
852 }
853
854 err = uprobe_consumer_test(skel, before, after);
855 if (!ASSERT_EQ(err, 0, "uprobe_consumer_test"))
856 goto cleanup;
857
858 for (idx = 0; idx < 4; idx++) {
859 const char *fmt = "BUG";
860 __u64 val = 0;
861
862 if (idx < 2) {
863 /*
864 * uprobe entry
865 * +1 if define in 'before'
866 */
867 if (test_bit(idx, before))
868 val++;
869 fmt = "prog 0/1: uprobe";
870 } else {
871 /*
872 * uprobe return is tricky ;-)
873 *
874 * to trigger uretprobe consumer, the uretprobe needs to be installed,
875 * which means one of the 'return' uprobes was alive when probe was hit:
876 *
877 * idxs: 2/3 uprobe return in 'installed' mask
878 *
879 * in addition if 'after' state removes everything that was installed in
880 * 'before' state, then uprobe kernel object goes away and return uprobe
881 * is not installed and we won't hit it even if it's in 'after' state.
882 */
883 unsigned long had_uretprobes = before & 0b1100; /* is uretprobe installed */
884 unsigned long probe_preserved = before & after; /* did uprobe go away */
885
886 if (had_uretprobes && probe_preserved && test_bit(idx, after))
887 val++;
888 fmt = "idx 2/3: uretprobe";
889 }
890
891 ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt);
892 skel->bss->uprobe_result[idx] = 0;
893 }
894
895 cleanup:
896 for (idx = 0; idx < 4; idx++)
897 uprobe_detach(skel, idx);
898 }
899
test_consumers(void)900 static void test_consumers(void)
901 {
902 struct uprobe_multi_consumers *skel;
903 int before, after;
904
905 skel = uprobe_multi_consumers__open_and_load();
906 if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load"))
907 return;
908
909 /*
910 * The idea of this test is to try all possible combinations of
911 * uprobes consumers attached on single function.
912 *
913 * - 2 uprobe entry consumer
914 * - 2 uprobe exit consumers
915 *
916 * The test uses 4 uprobes attached on single function, but that
917 * translates into single uprobe with 4 consumers in kernel.
918 *
919 * The before/after values present the state of attached consumers
920 * before and after the probed function:
921 *
922 * bit/prog 0,1 : uprobe entry
923 * bit/prog 2,3 : uprobe return
924 *
925 * For example for:
926 *
927 * before = 0b0101
928 * after = 0b0110
929 *
930 * it means that before we call 'uprobe_consumer_test' we attach
931 * uprobes defined in 'before' value:
932 *
933 * - bit/prog 0: uprobe entry
934 * - bit/prog 2: uprobe return
935 *
936 * uprobe_consumer_test is called and inside it we attach and detach
937 * uprobes based on 'after' value:
938 *
939 * - bit/prog 0: stays untouched
940 * - bit/prog 2: uprobe return is detached
941 *
942 * uprobe_consumer_test returns and we check counters values increased
943 * by bpf programs on each uprobe to match the expected count based on
944 * before/after bits.
945 */
946
947 for (before = 0; before < 16; before++) {
948 for (after = 0; after < 16; after++)
949 consumer_test(skel, before, after);
950 }
951
952 uprobe_multi_consumers__destroy(skel);
953 }
954
uprobe_multi_program(struct uprobe_multi_pid_filter * skel,int idx)955 static struct bpf_program *uprobe_multi_program(struct uprobe_multi_pid_filter *skel, int idx)
956 {
957 switch (idx) {
958 case 0: return skel->progs.uprobe_multi_0;
959 case 1: return skel->progs.uprobe_multi_1;
960 case 2: return skel->progs.uprobe_multi_2;
961 }
962 return NULL;
963 }
964
965 #define TASKS 3
966
run_pid_filter(struct uprobe_multi_pid_filter * skel,bool clone_vm,bool retprobe)967 static void run_pid_filter(struct uprobe_multi_pid_filter *skel, bool clone_vm, bool retprobe)
968 {
969 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, .retprobe = retprobe);
970 struct bpf_link *link[TASKS] = {};
971 struct child child[TASKS] = {};
972 int i;
973
974 memset(skel->bss->test, 0, sizeof(skel->bss->test));
975
976 for (i = 0; i < TASKS; i++) {
977 if (!ASSERT_OK(spawn_child_flag(&child[i], clone_vm), "spawn_child"))
978 goto cleanup;
979 skel->bss->pids[i] = child[i].pid;
980 }
981
982 for (i = 0; i < TASKS; i++) {
983 link[i] = bpf_program__attach_uprobe_multi(uprobe_multi_program(skel, i),
984 child[i].pid, "/proc/self/exe",
985 "uprobe_multi_func_1", &opts);
986 if (!ASSERT_OK_PTR(link[i], "bpf_program__attach_uprobe_multi"))
987 goto cleanup;
988 }
989
990 for (i = 0; i < TASKS; i++)
991 kick_child(&child[i]);
992
993 for (i = 0; i < TASKS; i++) {
994 ASSERT_EQ(skel->bss->test[i][0], 1, "pid");
995 ASSERT_EQ(skel->bss->test[i][1], 0, "unknown");
996 }
997
998 cleanup:
999 for (i = 0; i < TASKS; i++)
1000 bpf_link__destroy(link[i]);
1001 for (i = 0; i < TASKS; i++)
1002 release_child(&child[i]);
1003 }
1004
test_pid_filter_process(bool clone_vm)1005 static void test_pid_filter_process(bool clone_vm)
1006 {
1007 struct uprobe_multi_pid_filter *skel;
1008
1009 skel = uprobe_multi_pid_filter__open_and_load();
1010 if (!ASSERT_OK_PTR(skel, "uprobe_multi_pid_filter__open_and_load"))
1011 return;
1012
1013 run_pid_filter(skel, clone_vm, false);
1014 run_pid_filter(skel, clone_vm, true);
1015
1016 uprobe_multi_pid_filter__destroy(skel);
1017 }
1018
test_bench_attach_uprobe(void)1019 static void test_bench_attach_uprobe(void)
1020 {
1021 long attach_start_ns = 0, attach_end_ns = 0;
1022 struct uprobe_multi_bench *skel = NULL;
1023 long detach_start_ns, detach_end_ns;
1024 double attach_delta, detach_delta;
1025 int err;
1026
1027 skel = uprobe_multi_bench__open_and_load();
1028 if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load"))
1029 goto cleanup;
1030
1031 attach_start_ns = get_time_ns();
1032
1033 err = uprobe_multi_bench__attach(skel);
1034 if (!ASSERT_OK(err, "uprobe_multi_bench__attach"))
1035 goto cleanup;
1036
1037 attach_end_ns = get_time_ns();
1038
1039 system("./uprobe_multi bench");
1040
1041 ASSERT_EQ(skel->bss->count, 50000, "uprobes_count");
1042
1043 cleanup:
1044 detach_start_ns = get_time_ns();
1045 uprobe_multi_bench__destroy(skel);
1046 detach_end_ns = get_time_ns();
1047
1048 attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
1049 detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
1050
1051 printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
1052 printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
1053 }
1054
test_bench_attach_usdt(void)1055 static void test_bench_attach_usdt(void)
1056 {
1057 long attach_start_ns = 0, attach_end_ns = 0;
1058 struct uprobe_multi_usdt *skel = NULL;
1059 long detach_start_ns, detach_end_ns;
1060 double attach_delta, detach_delta;
1061
1062 skel = uprobe_multi_usdt__open_and_load();
1063 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open"))
1064 goto cleanup;
1065
1066 attach_start_ns = get_time_ns();
1067
1068 skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi",
1069 "test", "usdt", NULL);
1070 if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt"))
1071 goto cleanup;
1072
1073 attach_end_ns = get_time_ns();
1074
1075 system("./uprobe_multi usdt");
1076
1077 ASSERT_EQ(skel->bss->count, 50000, "usdt_count");
1078
1079 cleanup:
1080 detach_start_ns = get_time_ns();
1081 uprobe_multi_usdt__destroy(skel);
1082 detach_end_ns = get_time_ns();
1083
1084 attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
1085 detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
1086
1087 printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
1088 printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
1089 }
1090
test_uprobe_multi_test(void)1091 void test_uprobe_multi_test(void)
1092 {
1093 if (test__start_subtest("skel_api"))
1094 test_skel_api();
1095 if (test__start_subtest("attach_api_pattern"))
1096 test_attach_api_pattern();
1097 if (test__start_subtest("attach_api_syms"))
1098 test_attach_api_syms();
1099 if (test__start_subtest("link_api"))
1100 test_link_api();
1101 if (test__start_subtest("bench_uprobe"))
1102 test_bench_attach_uprobe();
1103 if (test__start_subtest("bench_usdt"))
1104 test_bench_attach_usdt();
1105 if (test__start_subtest("attach_api_fails"))
1106 test_attach_api_fails();
1107 if (test__start_subtest("attach_uprobe_fails"))
1108 test_attach_uprobe_fails();
1109 if (test__start_subtest("consumers"))
1110 test_consumers();
1111 if (test__start_subtest("filter_fork"))
1112 test_pid_filter_process(false);
1113 if (test__start_subtest("filter_clone_vm"))
1114 test_pid_filter_process(true);
1115 }
1116