1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <unistd.h>
4 #include <pthread.h>
5 #include <test_progs.h>
6 #include "uprobe_multi.skel.h"
7 #include "uprobe_multi_bench.skel.h"
8 #include "uprobe_multi_usdt.skel.h"
9 #include "uprobe_multi_consumers.skel.h"
10 #include "uprobe_multi_pid_filter.skel.h"
11 #include "uprobe_multi_session.skel.h"
12 #include "uprobe_multi_session_single.skel.h"
13 #include "uprobe_multi_session_cookie.skel.h"
14 #include "uprobe_multi_session_recursive.skel.h"
15 #include "uprobe_multi_verifier.skel.h"
16 #include "bpf/libbpf_internal.h"
17 #include "testing_helpers.h"
18 #include "../sdt.h"
19
20 static char test_data[] = "test_data";
21
uprobe_multi_func_1(void)22 noinline void uprobe_multi_func_1(void)
23 {
24 asm volatile ("");
25 }
26
uprobe_multi_func_2(void)27 noinline void uprobe_multi_func_2(void)
28 {
29 asm volatile ("");
30 }
31
uprobe_multi_func_3(void)32 noinline void uprobe_multi_func_3(void)
33 {
34 asm volatile ("");
35 }
36
usdt_trigger(void)37 noinline void usdt_trigger(void)
38 {
39 STAP_PROBE(test, pid_filter_usdt);
40 }
41
uprobe_session_recursive(int i)42 noinline void uprobe_session_recursive(int i)
43 {
44 if (i)
45 uprobe_session_recursive(i - 1);
46 }
47
48 struct child {
49 int go[2];
50 int c2p[2]; /* child -> parent channel */
51 int pid;
52 int tid;
53 pthread_t thread;
54 char stack[65536];
55 };
56
release_child(struct child * child)57 static void release_child(struct child *child)
58 {
59 int child_status;
60
61 if (!child)
62 return;
63 close(child->go[1]);
64 close(child->go[0]);
65 if (child->thread) {
66 pthread_join(child->thread, NULL);
67 child->thread = 0;
68 }
69 close(child->c2p[0]);
70 close(child->c2p[1]);
71 if (child->pid > 0)
72 waitpid(child->pid, &child_status, 0);
73 }
74
kick_child(struct child * child)75 static void kick_child(struct child *child)
76 {
77 char c = 1;
78
79 if (child) {
80 write(child->go[1], &c, 1);
81 release_child(child);
82 }
83 fflush(NULL);
84 }
85
child_func(void * arg)86 static int child_func(void *arg)
87 {
88 struct child *child = arg;
89 int err, c;
90
91 close(child->go[1]);
92
93 /* wait for parent's kick */
94 err = read(child->go[0], &c, 1);
95 if (err != 1)
96 exit(err);
97
98 uprobe_multi_func_1();
99 uprobe_multi_func_2();
100 uprobe_multi_func_3();
101 usdt_trigger();
102
103 exit(errno);
104 }
105
spawn_child_flag(struct child * child,bool clone_vm)106 static int spawn_child_flag(struct child *child, bool clone_vm)
107 {
108 /* pipe to notify child to execute the trigger functions */
109 if (pipe(child->go))
110 return -1;
111
112 if (clone_vm) {
113 child->pid = child->tid = clone(child_func, child->stack + sizeof(child->stack)/2,
114 CLONE_VM|SIGCHLD, child);
115 } else {
116 child->pid = child->tid = fork();
117 }
118 if (child->pid < 0) {
119 release_child(child);
120 errno = EINVAL;
121 return -1;
122 }
123
124 /* fork-ed child */
125 if (!clone_vm && child->pid == 0)
126 child_func(child);
127
128 return 0;
129 }
130
spawn_child(struct child * child)131 static int spawn_child(struct child *child)
132 {
133 return spawn_child_flag(child, false);
134 }
135
child_thread(void * ctx)136 static void *child_thread(void *ctx)
137 {
138 struct child *child = ctx;
139 int c = 0, err;
140
141 child->tid = sys_gettid();
142
143 /* let parent know we are ready */
144 err = write(child->c2p[1], &c, 1);
145 if (err != 1)
146 pthread_exit(&err);
147
148 /* wait for parent's kick */
149 err = read(child->go[0], &c, 1);
150 if (err != 1)
151 pthread_exit(&err);
152
153 uprobe_multi_func_1();
154 uprobe_multi_func_2();
155 uprobe_multi_func_3();
156 usdt_trigger();
157
158 err = 0;
159 pthread_exit(&err);
160 }
161
spawn_thread(struct child * child)162 static int spawn_thread(struct child *child)
163 {
164 int c, err;
165
166 /* pipe to notify child to execute the trigger functions */
167 if (pipe(child->go))
168 return -1;
169 /* pipe to notify parent that child thread is ready */
170 if (pipe(child->c2p)) {
171 close(child->go[0]);
172 close(child->go[1]);
173 return -1;
174 }
175
176 child->pid = getpid();
177
178 err = pthread_create(&child->thread, NULL, child_thread, child);
179 if (err) {
180 err = -errno;
181 close(child->go[0]);
182 close(child->go[1]);
183 close(child->c2p[0]);
184 close(child->c2p[1]);
185 errno = -err;
186 return -1;
187 }
188
189 err = read(child->c2p[0], &c, 1);
190 if (!ASSERT_EQ(err, 1, "child_thread_ready"))
191 return -1;
192
193 return 0;
194 }
195
uprobe_multi_test_run(struct uprobe_multi * skel,struct child * child)196 static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
197 {
198 skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
199 skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
200 skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
201
202 skel->bss->user_ptr = test_data;
203
204 /*
205 * Disable pid check in bpf program if we are pid filter test,
206 * because the probe should be executed only by child->pid
207 * passed at the probe attach.
208 */
209 skel->bss->pid = child ? 0 : getpid();
210 skel->bss->expect_pid = child ? child->pid : 0;
211
212 /* trigger all probes, if we are testing child *process*, just to make
213 * sure that PID filtering doesn't let through activations from wrong
214 * PIDs; when we test child *thread*, we don't want to do this to
215 * avoid double counting number of triggering events
216 */
217 if (!child || !child->thread) {
218 uprobe_multi_func_1();
219 uprobe_multi_func_2();
220 uprobe_multi_func_3();
221 usdt_trigger();
222 }
223
224 if (child)
225 kick_child(child);
226
227 /*
228 * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
229 * function and each sleepable probe (6) increments uprobe_multi_sleep_result.
230 */
231 ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result");
232 ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result");
233 ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result");
234
235 ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result");
236 ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result");
237 ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result");
238
239 ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
240
241 ASSERT_FALSE(skel->bss->bad_pid_seen, "bad_pid_seen");
242
243 if (child) {
244 ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
245 ASSERT_EQ(skel->bss->child_tid, child->tid, "uprobe_multi_child_tid");
246 }
247 }
248
test_skel_api(void)249 static void test_skel_api(void)
250 {
251 struct uprobe_multi *skel = NULL;
252 int err;
253
254 skel = uprobe_multi__open_and_load();
255 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
256 goto cleanup;
257
258 err = uprobe_multi__attach(skel);
259 if (!ASSERT_OK(err, "uprobe_multi__attach"))
260 goto cleanup;
261
262 uprobe_multi_test_run(skel, NULL);
263
264 cleanup:
265 uprobe_multi__destroy(skel);
266 }
267
268 static void
__test_attach_api(const char * binary,const char * pattern,struct bpf_uprobe_multi_opts * opts,struct child * child)269 __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts,
270 struct child *child)
271 {
272 pid_t pid = child ? child->pid : -1;
273 struct uprobe_multi *skel = NULL;
274
275 skel = uprobe_multi__open_and_load();
276 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
277 goto cleanup;
278
279 opts->retprobe = false;
280 skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid,
281 binary, pattern, opts);
282 if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi"))
283 goto cleanup;
284
285 opts->retprobe = true;
286 skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid,
287 binary, pattern, opts);
288 if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi"))
289 goto cleanup;
290
291 opts->retprobe = false;
292 skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid,
293 binary, pattern, opts);
294 if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi"))
295 goto cleanup;
296
297 opts->retprobe = true;
298 skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep,
299 pid, binary, pattern, opts);
300 if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi"))
301 goto cleanup;
302
303 opts->retprobe = false;
304 skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1,
305 binary, pattern, opts);
306 if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
307 goto cleanup;
308
309 /* Attach (uprobe-backed) USDTs */
310 skel->links.usdt_pid = bpf_program__attach_usdt(skel->progs.usdt_pid, pid, binary,
311 "test", "pid_filter_usdt", NULL);
312 if (!ASSERT_OK_PTR(skel->links.usdt_pid, "attach_usdt_pid"))
313 goto cleanup;
314
315 skel->links.usdt_extra = bpf_program__attach_usdt(skel->progs.usdt_extra, -1, binary,
316 "test", "pid_filter_usdt", NULL);
317 if (!ASSERT_OK_PTR(skel->links.usdt_extra, "attach_usdt_extra"))
318 goto cleanup;
319
320 uprobe_multi_test_run(skel, child);
321
322 ASSERT_FALSE(skel->bss->bad_pid_seen_usdt, "bad_pid_seen_usdt");
323 if (child) {
324 ASSERT_EQ(skel->bss->child_pid_usdt, child->pid, "usdt_multi_child_pid");
325 ASSERT_EQ(skel->bss->child_tid_usdt, child->tid, "usdt_multi_child_tid");
326 }
327 cleanup:
328 uprobe_multi__destroy(skel);
329 }
330
331 static void
test_attach_api(const char * binary,const char * pattern,struct bpf_uprobe_multi_opts * opts)332 test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts)
333 {
334 static struct child child;
335
336 memset(&child, 0, sizeof(child));
337
338 /* no pid filter */
339 __test_attach_api(binary, pattern, opts, NULL);
340
341 /* pid filter */
342 if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
343 return;
344
345 __test_attach_api(binary, pattern, opts, &child);
346
347 /* pid filter (thread) */
348 if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
349 return;
350
351 __test_attach_api(binary, pattern, opts, &child);
352 }
353
test_attach_api_pattern(void)354 static void test_attach_api_pattern(void)
355 {
356 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
357
358 test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts);
359 test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts);
360 }
361
test_attach_api_syms(void)362 static void test_attach_api_syms(void)
363 {
364 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
365 const char *syms[3] = {
366 "uprobe_multi_func_1",
367 "uprobe_multi_func_2",
368 "uprobe_multi_func_3",
369 };
370
371 opts.syms = syms;
372 opts.cnt = ARRAY_SIZE(syms);
373 test_attach_api("/proc/self/exe", NULL, &opts);
374 }
375
test_attach_api_fails(void)376 static void test_attach_api_fails(void)
377 {
378 LIBBPF_OPTS(bpf_link_create_opts, opts);
379 const char *path = "/proc/self/exe";
380 struct uprobe_multi *skel = NULL;
381 int prog_fd, link_fd = -1;
382 unsigned long offset = 0;
383
384 skel = uprobe_multi__open_and_load();
385 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
386 goto cleanup;
387
388 prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
389
390 /* abnormal cnt */
391 opts.uprobe_multi.path = path;
392 opts.uprobe_multi.offsets = &offset;
393 opts.uprobe_multi.cnt = INT_MAX;
394 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
395 if (!ASSERT_ERR(link_fd, "link_fd"))
396 goto cleanup;
397 if (!ASSERT_EQ(link_fd, -E2BIG, "big cnt"))
398 goto cleanup;
399
400 /* cnt is 0 */
401 LIBBPF_OPTS_RESET(opts,
402 .uprobe_multi.path = path,
403 .uprobe_multi.offsets = (unsigned long *) &offset,
404 );
405
406 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
407 if (!ASSERT_ERR(link_fd, "link_fd"))
408 goto cleanup;
409 if (!ASSERT_EQ(link_fd, -EINVAL, "cnt_is_zero"))
410 goto cleanup;
411
412 /* negative offset */
413 offset = -1;
414 opts.uprobe_multi.path = path;
415 opts.uprobe_multi.offsets = (unsigned long *) &offset;
416 opts.uprobe_multi.cnt = 1;
417
418 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
419 if (!ASSERT_ERR(link_fd, "link_fd"))
420 goto cleanup;
421 if (!ASSERT_EQ(link_fd, -EINVAL, "offset_is_negative"))
422 goto cleanup;
423
424 /* offsets is NULL */
425 LIBBPF_OPTS_RESET(opts,
426 .uprobe_multi.path = path,
427 .uprobe_multi.cnt = 1,
428 );
429
430 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
431 if (!ASSERT_ERR(link_fd, "link_fd"))
432 goto cleanup;
433 if (!ASSERT_EQ(link_fd, -EINVAL, "offsets_is_null"))
434 goto cleanup;
435
436 /* wrong offsets pointer */
437 LIBBPF_OPTS_RESET(opts,
438 .uprobe_multi.path = path,
439 .uprobe_multi.offsets = (unsigned long *) 1,
440 .uprobe_multi.cnt = 1,
441 );
442
443 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
444 if (!ASSERT_ERR(link_fd, "link_fd"))
445 goto cleanup;
446 if (!ASSERT_EQ(link_fd, -EFAULT, "offsets_is_wrong"))
447 goto cleanup;
448
449 /* path is NULL */
450 offset = 1;
451 LIBBPF_OPTS_RESET(opts,
452 .uprobe_multi.offsets = (unsigned long *) &offset,
453 .uprobe_multi.cnt = 1,
454 );
455
456 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
457 if (!ASSERT_ERR(link_fd, "link_fd"))
458 goto cleanup;
459 if (!ASSERT_EQ(link_fd, -EINVAL, "path_is_null"))
460 goto cleanup;
461
462 /* wrong path pointer */
463 LIBBPF_OPTS_RESET(opts,
464 .uprobe_multi.path = (const char *) 1,
465 .uprobe_multi.offsets = (unsigned long *) &offset,
466 .uprobe_multi.cnt = 1,
467 );
468
469 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
470 if (!ASSERT_ERR(link_fd, "link_fd"))
471 goto cleanup;
472 if (!ASSERT_EQ(link_fd, -EFAULT, "path_is_wrong"))
473 goto cleanup;
474
475 /* wrong path type */
476 LIBBPF_OPTS_RESET(opts,
477 .uprobe_multi.path = "/",
478 .uprobe_multi.offsets = (unsigned long *) &offset,
479 .uprobe_multi.cnt = 1,
480 );
481
482 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
483 if (!ASSERT_ERR(link_fd, "link_fd"))
484 goto cleanup;
485 if (!ASSERT_EQ(link_fd, -EBADF, "path_is_wrong_type"))
486 goto cleanup;
487
488 /* wrong cookies pointer */
489 LIBBPF_OPTS_RESET(opts,
490 .uprobe_multi.path = path,
491 .uprobe_multi.offsets = (unsigned long *) &offset,
492 .uprobe_multi.cookies = (__u64 *) 1ULL,
493 .uprobe_multi.cnt = 1,
494 );
495
496 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
497 if (!ASSERT_ERR(link_fd, "link_fd"))
498 goto cleanup;
499 if (!ASSERT_EQ(link_fd, -EFAULT, "cookies_is_wrong"))
500 goto cleanup;
501
502 /* wrong ref_ctr_offsets pointer */
503 LIBBPF_OPTS_RESET(opts,
504 .uprobe_multi.path = path,
505 .uprobe_multi.offsets = (unsigned long *) &offset,
506 .uprobe_multi.cookies = (__u64 *) &offset,
507 .uprobe_multi.ref_ctr_offsets = (unsigned long *) 1,
508 .uprobe_multi.cnt = 1,
509 );
510
511 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
512 if (!ASSERT_ERR(link_fd, "link_fd"))
513 goto cleanup;
514 if (!ASSERT_EQ(link_fd, -EFAULT, "ref_ctr_offsets_is_wrong"))
515 goto cleanup;
516
517 /* wrong flags */
518 LIBBPF_OPTS_RESET(opts,
519 .uprobe_multi.flags = 1 << 31,
520 );
521
522 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
523 if (!ASSERT_ERR(link_fd, "link_fd"))
524 goto cleanup;
525 if (!ASSERT_EQ(link_fd, -EINVAL, "wrong_flags"))
526 goto cleanup;
527
528 /* wrong pid */
529 LIBBPF_OPTS_RESET(opts,
530 .uprobe_multi.path = path,
531 .uprobe_multi.offsets = (unsigned long *) &offset,
532 .uprobe_multi.cnt = 1,
533 .uprobe_multi.pid = -2,
534 );
535
536 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
537 if (!ASSERT_ERR(link_fd, "link_fd"))
538 goto cleanup;
539 ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
540
541 cleanup:
542 if (link_fd >= 0)
543 close(link_fd);
544 uprobe_multi__destroy(skel);
545 }
546
547 #ifdef __x86_64__
uprobe_multi_error_func(void)548 noinline void uprobe_multi_error_func(void)
549 {
550 /*
551 * If --fcf-protection=branch is enabled the gcc generates endbr as
552 * first instruction, so marking the exact address of int3 with the
553 * symbol to be used in the attach_uprobe_fail_trap test below.
554 */
555 asm volatile (
556 ".globl uprobe_multi_error_func_int3; \n"
557 "uprobe_multi_error_func_int3: \n"
558 "int3 \n"
559 );
560 }
561
562 /*
563 * Attaching uprobe on uprobe_multi_error_func results in error
564 * because it already starts with int3 instruction.
565 */
attach_uprobe_fail_trap(struct uprobe_multi * skel)566 static void attach_uprobe_fail_trap(struct uprobe_multi *skel)
567 {
568 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
569 const char *syms[4] = {
570 "uprobe_multi_func_1",
571 "uprobe_multi_func_2",
572 "uprobe_multi_func_3",
573 "uprobe_multi_error_func_int3",
574 };
575
576 opts.syms = syms;
577 opts.cnt = ARRAY_SIZE(syms);
578
579 skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1,
580 "/proc/self/exe", NULL, &opts);
581 if (!ASSERT_ERR_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) {
582 bpf_link__destroy(skel->links.uprobe);
583 skel->links.uprobe = NULL;
584 }
585 }
586 #else
attach_uprobe_fail_trap(struct uprobe_multi * skel)587 static void attach_uprobe_fail_trap(struct uprobe_multi *skel) { }
588 #endif
589
590 short sema_1 __used, sema_2 __used;
591
attach_uprobe_fail_refctr(struct uprobe_multi * skel)592 static void attach_uprobe_fail_refctr(struct uprobe_multi *skel)
593 {
594 unsigned long *tmp_offsets = NULL, *tmp_ref_ctr_offsets = NULL;
595 unsigned long offsets[3], ref_ctr_offsets[3];
596 LIBBPF_OPTS(bpf_link_create_opts, opts);
597 const char *path = "/proc/self/exe";
598 const char *syms[3] = {
599 "uprobe_multi_func_1",
600 "uprobe_multi_func_2",
601 };
602 const char *sema[3] = {
603 "sema_1",
604 "sema_2",
605 };
606 int prog_fd, link_fd, err;
607
608 prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
609
610 err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &syms,
611 &tmp_offsets, STT_FUNC);
612 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func"))
613 return;
614
615 err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &sema,
616 &tmp_ref_ctr_offsets, STT_OBJECT);
617 if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema"))
618 goto cleanup;
619
620 /*
621 * We attach to 3 uprobes on 2 functions, so 2 uprobes share single function,
622 * but with different ref_ctr_offset which is not allowed and results in fail.
623 */
624 offsets[0] = tmp_offsets[0]; /* uprobe_multi_func_1 */
625 offsets[1] = tmp_offsets[1]; /* uprobe_multi_func_2 */
626 offsets[2] = tmp_offsets[1]; /* uprobe_multi_func_2 */
627
628 ref_ctr_offsets[0] = tmp_ref_ctr_offsets[0]; /* sema_1 */
629 ref_ctr_offsets[1] = tmp_ref_ctr_offsets[1]; /* sema_2 */
630 ref_ctr_offsets[2] = tmp_ref_ctr_offsets[0]; /* sema_1, error */
631
632 opts.uprobe_multi.path = path;
633 opts.uprobe_multi.offsets = (const unsigned long *) &offsets;
634 opts.uprobe_multi.ref_ctr_offsets = (const unsigned long *) &ref_ctr_offsets;
635 opts.uprobe_multi.cnt = 3;
636
637 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
638 if (!ASSERT_ERR(link_fd, "link_fd"))
639 close(link_fd);
640
641 cleanup:
642 free(tmp_ref_ctr_offsets);
643 free(tmp_offsets);
644 }
645
test_attach_uprobe_fails(void)646 static void test_attach_uprobe_fails(void)
647 {
648 struct uprobe_multi *skel = NULL;
649
650 skel = uprobe_multi__open_and_load();
651 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
652 return;
653
654 /* attach fails due to adding uprobe on trap instruction, x86_64 only */
655 attach_uprobe_fail_trap(skel);
656
657 /* attach fail due to wrong ref_ctr_offs on one of the uprobes */
658 attach_uprobe_fail_refctr(skel);
659
660 uprobe_multi__destroy(skel);
661 }
662
__test_link_api(struct child * child)663 static void __test_link_api(struct child *child)
664 {
665 int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1;
666 LIBBPF_OPTS(bpf_link_create_opts, opts);
667 const char *path = "/proc/self/exe";
668 struct uprobe_multi *skel = NULL;
669 unsigned long *offsets = NULL;
670 const char *syms[3] = {
671 "uprobe_multi_func_1",
672 "uprobe_multi_func_2",
673 "uprobe_multi_func_3",
674 };
675 int link_extra_fd = -1;
676 int err;
677
678 err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets, STT_FUNC);
679 if (!ASSERT_OK(err, "elf_resolve_syms_offsets"))
680 return;
681
682 opts.uprobe_multi.path = path;
683 opts.uprobe_multi.offsets = offsets;
684 opts.uprobe_multi.cnt = ARRAY_SIZE(syms);
685 opts.uprobe_multi.pid = child ? child->pid : 0;
686
687 skel = uprobe_multi__open_and_load();
688 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
689 goto cleanup;
690
691 opts.kprobe_multi.flags = 0;
692 prog_fd = bpf_program__fd(skel->progs.uprobe);
693 link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
694 if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
695 goto cleanup;
696
697 opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
698 prog_fd = bpf_program__fd(skel->progs.uretprobe);
699 link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
700 if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
701 goto cleanup;
702
703 opts.kprobe_multi.flags = 0;
704 prog_fd = bpf_program__fd(skel->progs.uprobe_sleep);
705 link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
706 if (!ASSERT_GE(link3_fd, 0, "link3_fd"))
707 goto cleanup;
708
709 opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
710 prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep);
711 link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
712 if (!ASSERT_GE(link4_fd, 0, "link4_fd"))
713 goto cleanup;
714
715 opts.kprobe_multi.flags = 0;
716 opts.uprobe_multi.pid = 0;
717 prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
718 link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
719 if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd"))
720 goto cleanup;
721
722 uprobe_multi_test_run(skel, child);
723
724 cleanup:
725 if (link1_fd >= 0)
726 close(link1_fd);
727 if (link2_fd >= 0)
728 close(link2_fd);
729 if (link3_fd >= 0)
730 close(link3_fd);
731 if (link4_fd >= 0)
732 close(link4_fd);
733 if (link_extra_fd >= 0)
734 close(link_extra_fd);
735
736 uprobe_multi__destroy(skel);
737 free(offsets);
738 }
739
test_link_api(void)740 static void test_link_api(void)
741 {
742 static struct child child;
743
744 /* no pid filter */
745 __test_link_api(NULL);
746
747 /* pid filter */
748 if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
749 return;
750
751 __test_link_api(&child);
752
753 /* pid filter (thread) */
754 if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
755 return;
756
757 __test_link_api(&child);
758 }
759
760 static struct bpf_program *
get_program(struct uprobe_multi_consumers * skel,int prog)761 get_program(struct uprobe_multi_consumers *skel, int prog)
762 {
763 switch (prog) {
764 case 0:
765 return skel->progs.uprobe_0;
766 case 1:
767 return skel->progs.uprobe_1;
768 case 2:
769 return skel->progs.uprobe_2;
770 case 3:
771 return skel->progs.uprobe_3;
772 default:
773 ASSERT_FAIL("get_program");
774 return NULL;
775 }
776 }
777
778 static struct bpf_link **
get_link(struct uprobe_multi_consumers * skel,int link)779 get_link(struct uprobe_multi_consumers *skel, int link)
780 {
781 switch (link) {
782 case 0:
783 return &skel->links.uprobe_0;
784 case 1:
785 return &skel->links.uprobe_1;
786 case 2:
787 return &skel->links.uprobe_2;
788 case 3:
789 return &skel->links.uprobe_3;
790 default:
791 ASSERT_FAIL("get_link");
792 return NULL;
793 }
794 }
795
uprobe_attach(struct uprobe_multi_consumers * skel,int idx,unsigned long offset)796 static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx, unsigned long offset)
797 {
798 struct bpf_program *prog = get_program(skel, idx);
799 struct bpf_link **link = get_link(skel, idx);
800 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
801
802 if (!prog || !link)
803 return -1;
804
805 opts.offsets = &offset;
806 opts.cnt = 1;
807
808 /*
809 * bit/prog: 0 uprobe entry
810 * bit/prog: 1 uprobe return
811 * bit/prog: 2 uprobe session without return
812 * bit/prog: 3 uprobe session with return
813 */
814 opts.retprobe = idx == 1;
815 opts.session = idx == 2 || idx == 3;
816
817 *link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", NULL, &opts);
818 if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi"))
819 return -1;
820 return 0;
821 }
822
uprobe_detach(struct uprobe_multi_consumers * skel,int idx)823 static void uprobe_detach(struct uprobe_multi_consumers *skel, int idx)
824 {
825 struct bpf_link **link = get_link(skel, idx);
826
827 bpf_link__destroy(*link);
828 *link = NULL;
829 }
830
test_bit(int bit,unsigned long val)831 static bool test_bit(int bit, unsigned long val)
832 {
833 return val & (1 << bit);
834 }
835
836 noinline int
uprobe_consumer_test(struct uprobe_multi_consumers * skel,unsigned long before,unsigned long after,unsigned long offset)837 uprobe_consumer_test(struct uprobe_multi_consumers *skel,
838 unsigned long before, unsigned long after,
839 unsigned long offset)
840 {
841 int idx;
842
843 /* detach uprobe for each unset programs in 'before' state ... */
844 for (idx = 0; idx < 4; idx++) {
845 if (test_bit(idx, before) && !test_bit(idx, after))
846 uprobe_detach(skel, idx);
847 }
848
849 /* ... and attach all new programs in 'after' state */
850 for (idx = 0; idx < 4; idx++) {
851 if (!test_bit(idx, before) && test_bit(idx, after)) {
852 if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_after"))
853 return -1;
854 }
855 }
856 return 0;
857 }
858
859 /*
860 * We generate 16 consumer_testX functions that will have uprobe installed on
861 * and will be called in separate threads. All function pointer are stored in
862 * "consumers" section and each thread will pick one function based on index.
863 */
864
865 extern const void *__start_consumers;
866
867 #define __CONSUMER_TEST(func) \
868 noinline int func(struct uprobe_multi_consumers *skel, unsigned long before, \
869 unsigned long after, unsigned long offset) \
870 { \
871 return uprobe_consumer_test(skel, before, after, offset); \
872 } \
873 void *__ ## func __used __attribute__((section("consumers"))) = (void *) func;
874
875 #define CONSUMER_TEST(func) __CONSUMER_TEST(func)
876
877 #define C1 CONSUMER_TEST(__PASTE(consumer_test, __COUNTER__))
878 #define C4 C1 C1 C1 C1
879 #define C16 C4 C4 C4 C4
880
881 C16
882
883 typedef int (*test_t)(struct uprobe_multi_consumers *, unsigned long,
884 unsigned long, unsigned long);
885
consumer_test(struct uprobe_multi_consumers * skel,unsigned long before,unsigned long after,test_t test,unsigned long offset)886 static int consumer_test(struct uprobe_multi_consumers *skel,
887 unsigned long before, unsigned long after,
888 test_t test, unsigned long offset)
889 {
890 int err, idx, ret = -1;
891
892 printf("consumer_test before %lu after %lu\n", before, after);
893
894 /* 'before' is each, we attach uprobe for every set idx */
895 for (idx = 0; idx < 4; idx++) {
896 if (test_bit(idx, before)) {
897 if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_before"))
898 goto cleanup;
899 }
900 }
901
902 err = test(skel, before, after, offset);
903 if (!ASSERT_EQ(err, 0, "uprobe_consumer_test"))
904 goto cleanup;
905
906 for (idx = 0; idx < 4; idx++) {
907 bool uret_stays, uret_survives;
908 const char *fmt = "BUG";
909 __u64 val = 0;
910
911 switch (idx) {
912 case 0:
913 /*
914 * uprobe entry
915 * +1 if define in 'before'
916 */
917 if (test_bit(idx, before))
918 val++;
919 fmt = "prog 0: uprobe";
920 break;
921 case 1:
922 /*
923 * To trigger uretprobe consumer, the uretprobe under test either stayed from
924 * before to after (uret_stays + test_bit) or uretprobe instance survived and
925 * we have uretprobe active in after (uret_survives + test_bit)
926 */
927 uret_stays = before & after & 0b0110;
928 uret_survives = ((before & 0b0110) && (after & 0b0110) && (before & 0b1001));
929
930 if ((uret_stays || uret_survives) && test_bit(idx, after))
931 val++;
932 fmt = "prog 1: uretprobe";
933 break;
934 case 2:
935 /*
936 * session with return
937 * +1 if defined in 'before'
938 * +1 if defined in 'after'
939 */
940 if (test_bit(idx, before)) {
941 val++;
942 if (test_bit(idx, after))
943 val++;
944 }
945 fmt = "prog 2: session with return";
946 break;
947 case 3:
948 /*
949 * session without return
950 * +1 if defined in 'before'
951 */
952 if (test_bit(idx, before))
953 val++;
954 fmt = "prog 3: session with NO return";
955 break;
956 }
957
958 if (!ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt))
959 goto cleanup;
960 skel->bss->uprobe_result[idx] = 0;
961 }
962
963 ret = 0;
964
965 cleanup:
966 for (idx = 0; idx < 4; idx++)
967 uprobe_detach(skel, idx);
968 return ret;
969 }
970
971 #define CONSUMER_MAX 16
972
973 /*
974 * Each thread runs 1/16 of the load by running test for single
975 * 'before' number (based on thread index) and full scale of
976 * 'after' numbers.
977 */
consumer_thread(void * arg)978 static void *consumer_thread(void *arg)
979 {
980 unsigned long idx = (unsigned long) arg;
981 struct uprobe_multi_consumers *skel;
982 unsigned long offset;
983 const void *func;
984 int after;
985
986 skel = uprobe_multi_consumers__open_and_load();
987 if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load"))
988 return NULL;
989
990 func = *((&__start_consumers) + idx);
991
992 offset = get_uprobe_offset(func);
993 if (!ASSERT_GE(offset, 0, "uprobe_offset"))
994 goto out;
995
996 for (after = 0; after < CONSUMER_MAX; after++)
997 if (consumer_test(skel, idx, after, func, offset))
998 goto out;
999
1000 out:
1001 uprobe_multi_consumers__destroy(skel);
1002 return NULL;
1003 }
1004
1005
test_consumers(void)1006 static void test_consumers(void)
1007 {
1008 pthread_t pt[CONSUMER_MAX];
1009 unsigned long idx;
1010 int err;
1011
1012 /*
1013 * The idea of this test is to try all possible combinations of
1014 * uprobes consumers attached on single function.
1015 *
1016 * - 1 uprobe entry consumer
1017 * - 1 uprobe exit consumer
1018 * - 1 uprobe session with return
1019 * - 1 uprobe session without return
1020 *
1021 * The test uses 4 uprobes attached on single function, but that
1022 * translates into single uprobe with 4 consumers in kernel.
1023 *
1024 * The before/after values present the state of attached consumers
1025 * before and after the probed function:
1026 *
1027 * bit/prog 0 : uprobe entry
1028 * bit/prog 1 : uprobe return
1029 *
1030 * For example for:
1031 *
1032 * before = 0b01
1033 * after = 0b10
1034 *
1035 * it means that before we call 'uprobe_consumer_test' we attach
1036 * uprobes defined in 'before' value:
1037 *
1038 * - bit/prog 1: uprobe entry
1039 *
1040 * uprobe_consumer_test is called and inside it we attach and detach
1041 * uprobes based on 'after' value:
1042 *
1043 * - bit/prog 0: is detached
1044 * - bit/prog 1: is attached
1045 *
1046 * uprobe_consumer_test returns and we check counters values increased
1047 * by bpf programs on each uprobe to match the expected count based on
1048 * before/after bits.
1049 */
1050
1051 for (idx = 0; idx < CONSUMER_MAX; idx++) {
1052 err = pthread_create(&pt[idx], NULL, consumer_thread, (void *) idx);
1053 if (!ASSERT_OK(err, "pthread_create"))
1054 break;
1055 }
1056
1057 while (idx)
1058 pthread_join(pt[--idx], NULL);
1059 }
1060
uprobe_multi_program(struct uprobe_multi_pid_filter * skel,int idx)1061 static struct bpf_program *uprobe_multi_program(struct uprobe_multi_pid_filter *skel, int idx)
1062 {
1063 switch (idx) {
1064 case 0: return skel->progs.uprobe_multi_0;
1065 case 1: return skel->progs.uprobe_multi_1;
1066 case 2: return skel->progs.uprobe_multi_2;
1067 }
1068 return NULL;
1069 }
1070
1071 #define TASKS 3
1072
run_pid_filter(struct uprobe_multi_pid_filter * skel,bool clone_vm,bool retprobe)1073 static void run_pid_filter(struct uprobe_multi_pid_filter *skel, bool clone_vm, bool retprobe)
1074 {
1075 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, .retprobe = retprobe);
1076 struct bpf_link *link[TASKS] = {};
1077 struct child child[TASKS] = {};
1078 int i;
1079
1080 memset(skel->bss->test, 0, sizeof(skel->bss->test));
1081
1082 for (i = 0; i < TASKS; i++) {
1083 if (!ASSERT_OK(spawn_child_flag(&child[i], clone_vm), "spawn_child"))
1084 goto cleanup;
1085 skel->bss->pids[i] = child[i].pid;
1086 }
1087
1088 for (i = 0; i < TASKS; i++) {
1089 link[i] = bpf_program__attach_uprobe_multi(uprobe_multi_program(skel, i),
1090 child[i].pid, "/proc/self/exe",
1091 "uprobe_multi_func_1", &opts);
1092 if (!ASSERT_OK_PTR(link[i], "bpf_program__attach_uprobe_multi"))
1093 goto cleanup;
1094 }
1095
1096 for (i = 0; i < TASKS; i++)
1097 kick_child(&child[i]);
1098
1099 for (i = 0; i < TASKS; i++) {
1100 ASSERT_EQ(skel->bss->test[i][0], 1, "pid");
1101 ASSERT_EQ(skel->bss->test[i][1], 0, "unknown");
1102 }
1103
1104 cleanup:
1105 for (i = 0; i < TASKS; i++)
1106 bpf_link__destroy(link[i]);
1107 for (i = 0; i < TASKS; i++)
1108 release_child(&child[i]);
1109 }
1110
test_pid_filter_process(bool clone_vm)1111 static void test_pid_filter_process(bool clone_vm)
1112 {
1113 struct uprobe_multi_pid_filter *skel;
1114
1115 skel = uprobe_multi_pid_filter__open_and_load();
1116 if (!ASSERT_OK_PTR(skel, "uprobe_multi_pid_filter__open_and_load"))
1117 return;
1118
1119 run_pid_filter(skel, clone_vm, false);
1120 run_pid_filter(skel, clone_vm, true);
1121
1122 uprobe_multi_pid_filter__destroy(skel);
1123 }
1124
test_session_skel_api(void)1125 static void test_session_skel_api(void)
1126 {
1127 struct uprobe_multi_session *skel = NULL;
1128 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
1129 struct bpf_link *link = NULL;
1130 int err;
1131
1132 skel = uprobe_multi_session__open_and_load();
1133 if (!ASSERT_OK_PTR(skel, "uprobe_multi_session__open_and_load"))
1134 goto cleanup;
1135
1136 skel->bss->pid = getpid();
1137 skel->bss->user_ptr = test_data;
1138
1139 err = uprobe_multi_session__attach(skel);
1140 if (!ASSERT_OK(err, "uprobe_multi_session__attach"))
1141 goto cleanup;
1142
1143 /* trigger all probes */
1144 skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
1145 skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
1146 skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
1147
1148 uprobe_multi_func_1();
1149 uprobe_multi_func_2();
1150 uprobe_multi_func_3();
1151
1152 /*
1153 * We expect 2 for uprobe_multi_func_2 because it runs both entry/return probe,
1154 * uprobe_multi_func_[13] run just the entry probe. All expected numbers are
1155 * doubled, because we run extra test for sleepable session.
1156 */
1157 ASSERT_EQ(skel->bss->uprobe_session_result[0], 2, "uprobe_multi_func_1_result");
1158 ASSERT_EQ(skel->bss->uprobe_session_result[1], 4, "uprobe_multi_func_2_result");
1159 ASSERT_EQ(skel->bss->uprobe_session_result[2], 2, "uprobe_multi_func_3_result");
1160
1161 /* We expect increase in 3 entry and 1 return session calls -> 4 */
1162 ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 4, "uprobe_multi_sleep_result");
1163
1164 cleanup:
1165 bpf_link__destroy(link);
1166 uprobe_multi_session__destroy(skel);
1167 }
1168
test_session_single_skel_api(void)1169 static void test_session_single_skel_api(void)
1170 {
1171 struct uprobe_multi_session_single *skel = NULL;
1172 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
1173 int err;
1174
1175 skel = uprobe_multi_session_single__open_and_load();
1176 if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_single__open_and_load"))
1177 goto cleanup;
1178
1179 skel->bss->pid = getpid();
1180
1181 err = uprobe_multi_session_single__attach(skel);
1182 if (!ASSERT_OK(err, "uprobe_multi_session_single__attach"))
1183 goto cleanup;
1184
1185 uprobe_multi_func_1();
1186
1187 /*
1188 * We expect consumer 0 and 2 to trigger just entry handler (value 1)
1189 * and consumer 1 to hit both (value 2).
1190 */
1191 ASSERT_EQ(skel->bss->uprobe_session_result[0], 1, "uprobe_session_result_0");
1192 ASSERT_EQ(skel->bss->uprobe_session_result[1], 2, "uprobe_session_result_1");
1193 ASSERT_EQ(skel->bss->uprobe_session_result[2], 1, "uprobe_session_result_2");
1194
1195 cleanup:
1196 uprobe_multi_session_single__destroy(skel);
1197 }
1198
test_session_cookie_skel_api(void)1199 static void test_session_cookie_skel_api(void)
1200 {
1201 struct uprobe_multi_session_cookie *skel = NULL;
1202 int err;
1203
1204 skel = uprobe_multi_session_cookie__open_and_load();
1205 if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_cookie__open_and_load"))
1206 goto cleanup;
1207
1208 skel->bss->pid = getpid();
1209
1210 err = uprobe_multi_session_cookie__attach(skel);
1211 if (!ASSERT_OK(err, "uprobe_multi_session_cookie__attach"))
1212 goto cleanup;
1213
1214 /* trigger all probes */
1215 uprobe_multi_func_1();
1216 uprobe_multi_func_2();
1217 uprobe_multi_func_3();
1218
1219 ASSERT_EQ(skel->bss->test_uprobe_1_result, 1, "test_uprobe_1_result");
1220 ASSERT_EQ(skel->bss->test_uprobe_2_result, 2, "test_uprobe_2_result");
1221 ASSERT_EQ(skel->bss->test_uprobe_3_result, 3, "test_uprobe_3_result");
1222
1223 cleanup:
1224 uprobe_multi_session_cookie__destroy(skel);
1225 }
1226
test_session_recursive_skel_api(void)1227 static void test_session_recursive_skel_api(void)
1228 {
1229 struct uprobe_multi_session_recursive *skel = NULL;
1230 int i, err;
1231
1232 skel = uprobe_multi_session_recursive__open_and_load();
1233 if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_recursive__open_and_load"))
1234 goto cleanup;
1235
1236 skel->bss->pid = getpid();
1237
1238 err = uprobe_multi_session_recursive__attach(skel);
1239 if (!ASSERT_OK(err, "uprobe_multi_session_recursive__attach"))
1240 goto cleanup;
1241
1242 for (i = 0; i < ARRAY_SIZE(skel->bss->test_uprobe_cookie_entry); i++)
1243 skel->bss->test_uprobe_cookie_entry[i] = i + 1;
1244
1245 uprobe_session_recursive(5);
1246
1247 /*
1248 * entry uprobe:
1249 * uprobe_session_recursive(5) { *cookie = 1, return 0
1250 * uprobe_session_recursive(4) { *cookie = 2, return 1
1251 * uprobe_session_recursive(3) { *cookie = 3, return 0
1252 * uprobe_session_recursive(2) { *cookie = 4, return 1
1253 * uprobe_session_recursive(1) { *cookie = 5, return 0
1254 * uprobe_session_recursive(0) { *cookie = 6, return 1
1255 * return uprobe:
1256 * } i = 0 not executed
1257 * } i = 1 test_uprobe_cookie_return[0] = 5
1258 * } i = 2 not executed
1259 * } i = 3 test_uprobe_cookie_return[1] = 3
1260 * } i = 4 not executed
1261 * } i = 5 test_uprobe_cookie_return[2] = 1
1262 */
1263
1264 ASSERT_EQ(skel->bss->idx_entry, 6, "idx_entry");
1265 ASSERT_EQ(skel->bss->idx_return, 3, "idx_return");
1266
1267 ASSERT_EQ(skel->bss->test_uprobe_cookie_return[0], 5, "test_uprobe_cookie_return[0]");
1268 ASSERT_EQ(skel->bss->test_uprobe_cookie_return[1], 3, "test_uprobe_cookie_return[1]");
1269 ASSERT_EQ(skel->bss->test_uprobe_cookie_return[2], 1, "test_uprobe_cookie_return[2]");
1270
1271 cleanup:
1272 uprobe_multi_session_recursive__destroy(skel);
1273 }
1274
test_bench_attach_uprobe(void)1275 static void test_bench_attach_uprobe(void)
1276 {
1277 long attach_start_ns = 0, attach_end_ns = 0;
1278 struct uprobe_multi_bench *skel = NULL;
1279 long detach_start_ns, detach_end_ns;
1280 double attach_delta, detach_delta;
1281 int err;
1282
1283 skel = uprobe_multi_bench__open_and_load();
1284 if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load"))
1285 goto cleanup;
1286
1287 attach_start_ns = get_time_ns();
1288
1289 err = uprobe_multi_bench__attach(skel);
1290 if (!ASSERT_OK(err, "uprobe_multi_bench__attach"))
1291 goto cleanup;
1292
1293 attach_end_ns = get_time_ns();
1294
1295 system("./uprobe_multi bench");
1296
1297 ASSERT_EQ(skel->bss->count, 50000, "uprobes_count");
1298
1299 cleanup:
1300 detach_start_ns = get_time_ns();
1301 uprobe_multi_bench__destroy(skel);
1302 detach_end_ns = get_time_ns();
1303
1304 attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
1305 detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
1306
1307 printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
1308 printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
1309 }
1310
test_bench_attach_usdt(void)1311 static void test_bench_attach_usdt(void)
1312 {
1313 long attach_start_ns = 0, attach_end_ns = 0;
1314 struct uprobe_multi_usdt *skel = NULL;
1315 long detach_start_ns, detach_end_ns;
1316 double attach_delta, detach_delta;
1317
1318 skel = uprobe_multi_usdt__open_and_load();
1319 if (!ASSERT_OK_PTR(skel, "uprobe_multi__open"))
1320 goto cleanup;
1321
1322 attach_start_ns = get_time_ns();
1323
1324 skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi",
1325 "test", "usdt", NULL);
1326 if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt"))
1327 goto cleanup;
1328
1329 attach_end_ns = get_time_ns();
1330
1331 system("./uprobe_multi usdt");
1332
1333 ASSERT_EQ(skel->bss->count, 50000, "usdt_count");
1334
1335 cleanup:
1336 detach_start_ns = get_time_ns();
1337 uprobe_multi_usdt__destroy(skel);
1338 detach_end_ns = get_time_ns();
1339
1340 attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
1341 detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
1342
1343 printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
1344 printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
1345 }
1346
test_uprobe_multi_test(void)1347 void test_uprobe_multi_test(void)
1348 {
1349 if (test__start_subtest("skel_api"))
1350 test_skel_api();
1351 if (test__start_subtest("attach_api_pattern"))
1352 test_attach_api_pattern();
1353 if (test__start_subtest("attach_api_syms"))
1354 test_attach_api_syms();
1355 if (test__start_subtest("link_api"))
1356 test_link_api();
1357 if (test__start_subtest("bench_uprobe"))
1358 test_bench_attach_uprobe();
1359 if (test__start_subtest("bench_usdt"))
1360 test_bench_attach_usdt();
1361 if (test__start_subtest("attach_api_fails"))
1362 test_attach_api_fails();
1363 if (test__start_subtest("attach_uprobe_fails"))
1364 test_attach_uprobe_fails();
1365 if (test__start_subtest("consumers"))
1366 test_consumers();
1367 if (test__start_subtest("filter_fork"))
1368 test_pid_filter_process(false);
1369 if (test__start_subtest("filter_clone_vm"))
1370 test_pid_filter_process(true);
1371 if (test__start_subtest("session"))
1372 test_session_skel_api();
1373 if (test__start_subtest("session_single"))
1374 test_session_single_skel_api();
1375 if (test__start_subtest("session_cookie"))
1376 test_session_cookie_skel_api();
1377 if (test__start_subtest("session_cookie_recursive"))
1378 test_session_recursive_skel_api();
1379 RUN_TESTS(uprobe_multi_verifier);
1380 }
1381