xref: /linux/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <unistd.h>
4 #include <pthread.h>
5 #include <test_progs.h>
6 #include "uprobe_multi.skel.h"
7 #include "uprobe_multi_bench.skel.h"
8 #include "uprobe_multi_usdt.skel.h"
9 #include "uprobe_multi_consumers.skel.h"
10 #include "uprobe_multi_pid_filter.skel.h"
11 #include "uprobe_multi_session.skel.h"
12 #include "uprobe_multi_session_single.skel.h"
13 #include "uprobe_multi_session_cookie.skel.h"
14 #include "uprobe_multi_session_recursive.skel.h"
15 #include "uprobe_multi_verifier.skel.h"
16 #include "bpf/libbpf_internal.h"
17 #include "testing_helpers.h"
18 #include "../sdt.h"
19 
20 static char test_data[] = "test_data";
21 
uprobe_multi_func_1(void)22 noinline void uprobe_multi_func_1(void)
23 {
24 	asm volatile ("");
25 }
26 
uprobe_multi_func_2(void)27 noinline void uprobe_multi_func_2(void)
28 {
29 	asm volatile ("");
30 }
31 
uprobe_multi_func_3(void)32 noinline void uprobe_multi_func_3(void)
33 {
34 	asm volatile ("");
35 }
36 
usdt_trigger(void)37 noinline void usdt_trigger(void)
38 {
39 	STAP_PROBE(test, pid_filter_usdt);
40 }
41 
uprobe_session_recursive(int i)42 noinline void uprobe_session_recursive(int i)
43 {
44 	if (i)
45 		uprobe_session_recursive(i - 1);
46 }
47 
48 struct child {
49 	int go[2];
50 	int c2p[2]; /* child -> parent channel */
51 	int pid;
52 	int tid;
53 	pthread_t thread;
54 	char stack[65536];
55 };
56 
release_child(struct child * child)57 static void release_child(struct child *child)
58 {
59 	int child_status;
60 
61 	if (!child)
62 		return;
63 	close(child->go[1]);
64 	close(child->go[0]);
65 	if (child->thread)
66 		pthread_join(child->thread, NULL);
67 	close(child->c2p[0]);
68 	close(child->c2p[1]);
69 	if (child->pid > 0)
70 		waitpid(child->pid, &child_status, 0);
71 }
72 
kick_child(struct child * child)73 static void kick_child(struct child *child)
74 {
75 	char c = 1;
76 
77 	if (child) {
78 		write(child->go[1], &c, 1);
79 		release_child(child);
80 	}
81 	fflush(NULL);
82 }
83 
child_func(void * arg)84 static int child_func(void *arg)
85 {
86 	struct child *child = arg;
87 	int err, c;
88 
89 	close(child->go[1]);
90 
91 	/* wait for parent's kick */
92 	err = read(child->go[0], &c, 1);
93 	if (err != 1)
94 		exit(err);
95 
96 	uprobe_multi_func_1();
97 	uprobe_multi_func_2();
98 	uprobe_multi_func_3();
99 	usdt_trigger();
100 
101 	exit(errno);
102 }
103 
spawn_child_flag(struct child * child,bool clone_vm)104 static int spawn_child_flag(struct child *child, bool clone_vm)
105 {
106 	/* pipe to notify child to execute the trigger functions */
107 	if (pipe(child->go))
108 		return -1;
109 
110 	if (clone_vm) {
111 		child->pid = child->tid = clone(child_func, child->stack + sizeof(child->stack)/2,
112 						CLONE_VM|SIGCHLD, child);
113 	} else {
114 		child->pid = child->tid = fork();
115 	}
116 	if (child->pid < 0) {
117 		release_child(child);
118 		errno = EINVAL;
119 		return -1;
120 	}
121 
122 	/* fork-ed child */
123 	if (!clone_vm && child->pid == 0)
124 		child_func(child);
125 
126 	return 0;
127 }
128 
spawn_child(struct child * child)129 static int spawn_child(struct child *child)
130 {
131 	return spawn_child_flag(child, false);
132 }
133 
child_thread(void * ctx)134 static void *child_thread(void *ctx)
135 {
136 	struct child *child = ctx;
137 	int c = 0, err;
138 
139 	child->tid = sys_gettid();
140 
141 	/* let parent know we are ready */
142 	err = write(child->c2p[1], &c, 1);
143 	if (err != 1)
144 		pthread_exit(&err);
145 
146 	/* wait for parent's kick */
147 	err = read(child->go[0], &c, 1);
148 	if (err != 1)
149 		pthread_exit(&err);
150 
151 	uprobe_multi_func_1();
152 	uprobe_multi_func_2();
153 	uprobe_multi_func_3();
154 	usdt_trigger();
155 
156 	err = 0;
157 	pthread_exit(&err);
158 }
159 
spawn_thread(struct child * child)160 static int spawn_thread(struct child *child)
161 {
162 	int c, err;
163 
164 	/* pipe to notify child to execute the trigger functions */
165 	if (pipe(child->go))
166 		return -1;
167 	/* pipe to notify parent that child thread is ready */
168 	if (pipe(child->c2p)) {
169 		close(child->go[0]);
170 		close(child->go[1]);
171 		return -1;
172 	}
173 
174 	child->pid = getpid();
175 
176 	err = pthread_create(&child->thread, NULL, child_thread, child);
177 	if (err) {
178 		err = -errno;
179 		close(child->go[0]);
180 		close(child->go[1]);
181 		close(child->c2p[0]);
182 		close(child->c2p[1]);
183 		errno = -err;
184 		return -1;
185 	}
186 
187 	err = read(child->c2p[0], &c, 1);
188 	if (!ASSERT_EQ(err, 1, "child_thread_ready"))
189 		return -1;
190 
191 	return 0;
192 }
193 
uprobe_multi_test_run(struct uprobe_multi * skel,struct child * child)194 static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
195 {
196 	skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
197 	skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
198 	skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
199 
200 	skel->bss->user_ptr = test_data;
201 
202 	/*
203 	 * Disable pid check in bpf program if we are pid filter test,
204 	 * because the probe should be executed only by child->pid
205 	 * passed at the probe attach.
206 	 */
207 	skel->bss->pid = child ? 0 : getpid();
208 	skel->bss->expect_pid = child ? child->pid : 0;
209 
210 	/* trigger all probes, if we are testing child *process*, just to make
211 	 * sure that PID filtering doesn't let through activations from wrong
212 	 * PIDs; when we test child *thread*, we don't want to do this to
213 	 * avoid double counting number of triggering events
214 	 */
215 	if (!child || !child->thread) {
216 		uprobe_multi_func_1();
217 		uprobe_multi_func_2();
218 		uprobe_multi_func_3();
219 		usdt_trigger();
220 	}
221 
222 	if (child)
223 		kick_child(child);
224 
225 	/*
226 	 * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
227 	 * function and each sleepable probe (6) increments uprobe_multi_sleep_result.
228 	 */
229 	ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result");
230 	ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result");
231 	ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result");
232 
233 	ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result");
234 	ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result");
235 	ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result");
236 
237 	ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
238 
239 	ASSERT_FALSE(skel->bss->bad_pid_seen, "bad_pid_seen");
240 
241 	if (child) {
242 		ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
243 		ASSERT_EQ(skel->bss->child_tid, child->tid, "uprobe_multi_child_tid");
244 	}
245 }
246 
test_skel_api(void)247 static void test_skel_api(void)
248 {
249 	struct uprobe_multi *skel = NULL;
250 	int err;
251 
252 	skel = uprobe_multi__open_and_load();
253 	if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
254 		goto cleanup;
255 
256 	err = uprobe_multi__attach(skel);
257 	if (!ASSERT_OK(err, "uprobe_multi__attach"))
258 		goto cleanup;
259 
260 	uprobe_multi_test_run(skel, NULL);
261 
262 cleanup:
263 	uprobe_multi__destroy(skel);
264 }
265 
266 static void
__test_attach_api(const char * binary,const char * pattern,struct bpf_uprobe_multi_opts * opts,struct child * child)267 __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts,
268 		  struct child *child)
269 {
270 	pid_t pid = child ? child->pid : -1;
271 	struct uprobe_multi *skel = NULL;
272 
273 	skel = uprobe_multi__open_and_load();
274 	if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
275 		goto cleanup;
276 
277 	opts->retprobe = false;
278 	skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid,
279 							      binary, pattern, opts);
280 	if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi"))
281 		goto cleanup;
282 
283 	opts->retprobe = true;
284 	skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid,
285 								 binary, pattern, opts);
286 	if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi"))
287 		goto cleanup;
288 
289 	opts->retprobe = false;
290 	skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid,
291 								    binary, pattern, opts);
292 	if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi"))
293 		goto cleanup;
294 
295 	opts->retprobe = true;
296 	skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep,
297 								       pid, binary, pattern, opts);
298 	if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi"))
299 		goto cleanup;
300 
301 	opts->retprobe = false;
302 	skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1,
303 								    binary, pattern, opts);
304 	if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
305 		goto cleanup;
306 
307 	/* Attach (uprobe-backed) USDTs */
308 	skel->links.usdt_pid = bpf_program__attach_usdt(skel->progs.usdt_pid, pid, binary,
309 							"test", "pid_filter_usdt", NULL);
310 	if (!ASSERT_OK_PTR(skel->links.usdt_pid, "attach_usdt_pid"))
311 		goto cleanup;
312 
313 	skel->links.usdt_extra = bpf_program__attach_usdt(skel->progs.usdt_extra, -1, binary,
314 							  "test", "pid_filter_usdt", NULL);
315 	if (!ASSERT_OK_PTR(skel->links.usdt_extra, "attach_usdt_extra"))
316 		goto cleanup;
317 
318 	uprobe_multi_test_run(skel, child);
319 
320 	ASSERT_FALSE(skel->bss->bad_pid_seen_usdt, "bad_pid_seen_usdt");
321 	if (child) {
322 		ASSERT_EQ(skel->bss->child_pid_usdt, child->pid, "usdt_multi_child_pid");
323 		ASSERT_EQ(skel->bss->child_tid_usdt, child->tid, "usdt_multi_child_tid");
324 	}
325 cleanup:
326 	uprobe_multi__destroy(skel);
327 }
328 
329 static void
test_attach_api(const char * binary,const char * pattern,struct bpf_uprobe_multi_opts * opts)330 test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts)
331 {
332 	static struct child child;
333 
334 	/* no pid filter */
335 	__test_attach_api(binary, pattern, opts, NULL);
336 
337 	/* pid filter */
338 	if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
339 		return;
340 
341 	__test_attach_api(binary, pattern, opts, &child);
342 
343 	/* pid filter (thread) */
344 	if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
345 		return;
346 
347 	__test_attach_api(binary, pattern, opts, &child);
348 }
349 
test_attach_api_pattern(void)350 static void test_attach_api_pattern(void)
351 {
352 	LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
353 
354 	test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts);
355 	test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts);
356 }
357 
test_attach_api_syms(void)358 static void test_attach_api_syms(void)
359 {
360 	LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
361 	const char *syms[3] = {
362 		"uprobe_multi_func_1",
363 		"uprobe_multi_func_2",
364 		"uprobe_multi_func_3",
365 	};
366 
367 	opts.syms = syms;
368 	opts.cnt = ARRAY_SIZE(syms);
369 	test_attach_api("/proc/self/exe", NULL, &opts);
370 }
371 
test_attach_api_fails(void)372 static void test_attach_api_fails(void)
373 {
374 	LIBBPF_OPTS(bpf_link_create_opts, opts);
375 	const char *path = "/proc/self/exe";
376 	struct uprobe_multi *skel = NULL;
377 	int prog_fd, link_fd = -1;
378 	unsigned long offset = 0;
379 
380 	skel = uprobe_multi__open_and_load();
381 	if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
382 		goto cleanup;
383 
384 	prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
385 
386 	/* abnormal cnt */
387 	opts.uprobe_multi.path = path;
388 	opts.uprobe_multi.offsets = &offset;
389 	opts.uprobe_multi.cnt = INT_MAX;
390 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
391 	if (!ASSERT_ERR(link_fd, "link_fd"))
392 		goto cleanup;
393 	if (!ASSERT_EQ(link_fd, -E2BIG, "big cnt"))
394 		goto cleanup;
395 
396 	/* cnt is 0 */
397 	LIBBPF_OPTS_RESET(opts,
398 		.uprobe_multi.path = path,
399 		.uprobe_multi.offsets = (unsigned long *) &offset,
400 	);
401 
402 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
403 	if (!ASSERT_ERR(link_fd, "link_fd"))
404 		goto cleanup;
405 	if (!ASSERT_EQ(link_fd, -EINVAL, "cnt_is_zero"))
406 		goto cleanup;
407 
408 	/* negative offset */
409 	offset = -1;
410 	opts.uprobe_multi.path = path;
411 	opts.uprobe_multi.offsets = (unsigned long *) &offset;
412 	opts.uprobe_multi.cnt = 1;
413 
414 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
415 	if (!ASSERT_ERR(link_fd, "link_fd"))
416 		goto cleanup;
417 	if (!ASSERT_EQ(link_fd, -EINVAL, "offset_is_negative"))
418 		goto cleanup;
419 
420 	/* offsets is NULL */
421 	LIBBPF_OPTS_RESET(opts,
422 		.uprobe_multi.path = path,
423 		.uprobe_multi.cnt = 1,
424 	);
425 
426 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
427 	if (!ASSERT_ERR(link_fd, "link_fd"))
428 		goto cleanup;
429 	if (!ASSERT_EQ(link_fd, -EINVAL, "offsets_is_null"))
430 		goto cleanup;
431 
432 	/* wrong offsets pointer */
433 	LIBBPF_OPTS_RESET(opts,
434 		.uprobe_multi.path = path,
435 		.uprobe_multi.offsets = (unsigned long *) 1,
436 		.uprobe_multi.cnt = 1,
437 	);
438 
439 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
440 	if (!ASSERT_ERR(link_fd, "link_fd"))
441 		goto cleanup;
442 	if (!ASSERT_EQ(link_fd, -EFAULT, "offsets_is_wrong"))
443 		goto cleanup;
444 
445 	/* path is NULL */
446 	offset = 1;
447 	LIBBPF_OPTS_RESET(opts,
448 		.uprobe_multi.offsets = (unsigned long *) &offset,
449 		.uprobe_multi.cnt = 1,
450 	);
451 
452 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
453 	if (!ASSERT_ERR(link_fd, "link_fd"))
454 		goto cleanup;
455 	if (!ASSERT_EQ(link_fd, -EINVAL, "path_is_null"))
456 		goto cleanup;
457 
458 	/* wrong path pointer  */
459 	LIBBPF_OPTS_RESET(opts,
460 		.uprobe_multi.path = (const char *) 1,
461 		.uprobe_multi.offsets = (unsigned long *) &offset,
462 		.uprobe_multi.cnt = 1,
463 	);
464 
465 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
466 	if (!ASSERT_ERR(link_fd, "link_fd"))
467 		goto cleanup;
468 	if (!ASSERT_EQ(link_fd, -EFAULT, "path_is_wrong"))
469 		goto cleanup;
470 
471 	/* wrong path type */
472 	LIBBPF_OPTS_RESET(opts,
473 		.uprobe_multi.path = "/",
474 		.uprobe_multi.offsets = (unsigned long *) &offset,
475 		.uprobe_multi.cnt = 1,
476 	);
477 
478 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
479 	if (!ASSERT_ERR(link_fd, "link_fd"))
480 		goto cleanup;
481 	if (!ASSERT_EQ(link_fd, -EBADF, "path_is_wrong_type"))
482 		goto cleanup;
483 
484 	/* wrong cookies pointer */
485 	LIBBPF_OPTS_RESET(opts,
486 		.uprobe_multi.path = path,
487 		.uprobe_multi.offsets = (unsigned long *) &offset,
488 		.uprobe_multi.cookies = (__u64 *) 1ULL,
489 		.uprobe_multi.cnt = 1,
490 	);
491 
492 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
493 	if (!ASSERT_ERR(link_fd, "link_fd"))
494 		goto cleanup;
495 	if (!ASSERT_EQ(link_fd, -EFAULT, "cookies_is_wrong"))
496 		goto cleanup;
497 
498 	/* wrong ref_ctr_offsets pointer */
499 	LIBBPF_OPTS_RESET(opts,
500 		.uprobe_multi.path = path,
501 		.uprobe_multi.offsets = (unsigned long *) &offset,
502 		.uprobe_multi.cookies = (__u64 *) &offset,
503 		.uprobe_multi.ref_ctr_offsets = (unsigned long *) 1,
504 		.uprobe_multi.cnt = 1,
505 	);
506 
507 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
508 	if (!ASSERT_ERR(link_fd, "link_fd"))
509 		goto cleanup;
510 	if (!ASSERT_EQ(link_fd, -EFAULT, "ref_ctr_offsets_is_wrong"))
511 		goto cleanup;
512 
513 	/* wrong flags */
514 	LIBBPF_OPTS_RESET(opts,
515 		.uprobe_multi.flags = 1 << 31,
516 	);
517 
518 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
519 	if (!ASSERT_ERR(link_fd, "link_fd"))
520 		goto cleanup;
521 	if (!ASSERT_EQ(link_fd, -EINVAL, "wrong_flags"))
522 		goto cleanup;
523 
524 	/* wrong pid */
525 	LIBBPF_OPTS_RESET(opts,
526 		.uprobe_multi.path = path,
527 		.uprobe_multi.offsets = (unsigned long *) &offset,
528 		.uprobe_multi.cnt = 1,
529 		.uprobe_multi.pid = -2,
530 	);
531 
532 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
533 	if (!ASSERT_ERR(link_fd, "link_fd"))
534 		goto cleanup;
535 	ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
536 
537 cleanup:
538 	if (link_fd >= 0)
539 		close(link_fd);
540 	uprobe_multi__destroy(skel);
541 }
542 
543 #ifdef __x86_64__
uprobe_multi_error_func(void)544 noinline void uprobe_multi_error_func(void)
545 {
546 	/*
547 	 * If --fcf-protection=branch is enabled the gcc generates endbr as
548 	 * first instruction, so marking the exact address of int3 with the
549 	 * symbol to be used in the attach_uprobe_fail_trap test below.
550 	 */
551 	asm volatile (
552 		".globl uprobe_multi_error_func_int3;	\n"
553 		"uprobe_multi_error_func_int3:		\n"
554 		"int3					\n"
555 	);
556 }
557 
558 /*
559  * Attaching uprobe on uprobe_multi_error_func results in error
560  * because it already starts with int3 instruction.
561  */
attach_uprobe_fail_trap(struct uprobe_multi * skel)562 static void attach_uprobe_fail_trap(struct uprobe_multi *skel)
563 {
564 	LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
565 	const char *syms[4] = {
566 		"uprobe_multi_func_1",
567 		"uprobe_multi_func_2",
568 		"uprobe_multi_func_3",
569 		"uprobe_multi_error_func_int3",
570 	};
571 
572 	opts.syms = syms;
573 	opts.cnt = ARRAY_SIZE(syms);
574 
575 	skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1,
576 							      "/proc/self/exe", NULL, &opts);
577 	if (!ASSERT_ERR_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi")) {
578 		bpf_link__destroy(skel->links.uprobe);
579 		skel->links.uprobe = NULL;
580 	}
581 }
582 #else
attach_uprobe_fail_trap(struct uprobe_multi * skel)583 static void attach_uprobe_fail_trap(struct uprobe_multi *skel) { }
584 #endif
585 
586 short sema_1 __used, sema_2 __used;
587 
attach_uprobe_fail_refctr(struct uprobe_multi * skel)588 static void attach_uprobe_fail_refctr(struct uprobe_multi *skel)
589 {
590 	unsigned long *tmp_offsets = NULL, *tmp_ref_ctr_offsets = NULL;
591 	unsigned long offsets[3], ref_ctr_offsets[3];
592 	LIBBPF_OPTS(bpf_link_create_opts, opts);
593 	const char *path = "/proc/self/exe";
594 	const char *syms[3] = {
595 		"uprobe_multi_func_1",
596 		"uprobe_multi_func_2",
597 	};
598 	const char *sema[3] = {
599 		"sema_1",
600 		"sema_2",
601 	};
602 	int prog_fd, link_fd, err;
603 
604 	prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
605 
606 	err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &syms,
607 				       &tmp_offsets, STT_FUNC);
608 	if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func"))
609 		return;
610 
611 	err = elf_resolve_syms_offsets("/proc/self/exe", 2, (const char **) &sema,
612 				       &tmp_ref_ctr_offsets, STT_OBJECT);
613 	if (!ASSERT_OK(err, "elf_resolve_syms_offsets_sema"))
614 		goto cleanup;
615 
616 	/*
617 	 * We attach to 3 uprobes on 2 functions, so 2 uprobes share single function,
618 	 * but with different ref_ctr_offset which is not allowed and results in fail.
619 	 */
620 	offsets[0] = tmp_offsets[0]; /* uprobe_multi_func_1 */
621 	offsets[1] = tmp_offsets[1]; /* uprobe_multi_func_2 */
622 	offsets[2] = tmp_offsets[1]; /* uprobe_multi_func_2 */
623 
624 	ref_ctr_offsets[0] = tmp_ref_ctr_offsets[0]; /* sema_1 */
625 	ref_ctr_offsets[1] = tmp_ref_ctr_offsets[1]; /* sema_2 */
626 	ref_ctr_offsets[2] = tmp_ref_ctr_offsets[0]; /* sema_1, error */
627 
628 	opts.uprobe_multi.path = path;
629 	opts.uprobe_multi.offsets = (const unsigned long *) &offsets;
630 	opts.uprobe_multi.ref_ctr_offsets = (const unsigned long *) &ref_ctr_offsets;
631 	opts.uprobe_multi.cnt = 3;
632 
633 	link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
634 	if (!ASSERT_ERR(link_fd, "link_fd"))
635 		close(link_fd);
636 
637 cleanup:
638 	free(tmp_ref_ctr_offsets);
639 	free(tmp_offsets);
640 }
641 
test_attach_uprobe_fails(void)642 static void test_attach_uprobe_fails(void)
643 {
644 	struct uprobe_multi *skel = NULL;
645 
646 	skel = uprobe_multi__open_and_load();
647 	if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
648 		return;
649 
650 	/* attach fails due to adding uprobe on trap instruction, x86_64 only */
651 	attach_uprobe_fail_trap(skel);
652 
653 	/* attach fail due to wrong ref_ctr_offs on one of the uprobes */
654 	attach_uprobe_fail_refctr(skel);
655 
656 	uprobe_multi__destroy(skel);
657 }
658 
__test_link_api(struct child * child)659 static void __test_link_api(struct child *child)
660 {
661 	int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1;
662 	LIBBPF_OPTS(bpf_link_create_opts, opts);
663 	const char *path = "/proc/self/exe";
664 	struct uprobe_multi *skel = NULL;
665 	unsigned long *offsets = NULL;
666 	const char *syms[3] = {
667 		"uprobe_multi_func_1",
668 		"uprobe_multi_func_2",
669 		"uprobe_multi_func_3",
670 	};
671 	int link_extra_fd = -1;
672 	int err;
673 
674 	err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets, STT_FUNC);
675 	if (!ASSERT_OK(err, "elf_resolve_syms_offsets"))
676 		return;
677 
678 	opts.uprobe_multi.path = path;
679 	opts.uprobe_multi.offsets = offsets;
680 	opts.uprobe_multi.cnt = ARRAY_SIZE(syms);
681 	opts.uprobe_multi.pid = child ? child->pid : 0;
682 
683 	skel = uprobe_multi__open_and_load();
684 	if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
685 		goto cleanup;
686 
687 	opts.kprobe_multi.flags = 0;
688 	prog_fd = bpf_program__fd(skel->progs.uprobe);
689 	link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
690 	if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
691 		goto cleanup;
692 
693 	opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
694 	prog_fd = bpf_program__fd(skel->progs.uretprobe);
695 	link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
696 	if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
697 		goto cleanup;
698 
699 	opts.kprobe_multi.flags = 0;
700 	prog_fd = bpf_program__fd(skel->progs.uprobe_sleep);
701 	link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
702 	if (!ASSERT_GE(link3_fd, 0, "link3_fd"))
703 		goto cleanup;
704 
705 	opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
706 	prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep);
707 	link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
708 	if (!ASSERT_GE(link4_fd, 0, "link4_fd"))
709 		goto cleanup;
710 
711 	opts.kprobe_multi.flags = 0;
712 	opts.uprobe_multi.pid = 0;
713 	prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
714 	link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
715 	if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd"))
716 		goto cleanup;
717 
718 	uprobe_multi_test_run(skel, child);
719 
720 cleanup:
721 	if (link1_fd >= 0)
722 		close(link1_fd);
723 	if (link2_fd >= 0)
724 		close(link2_fd);
725 	if (link3_fd >= 0)
726 		close(link3_fd);
727 	if (link4_fd >= 0)
728 		close(link4_fd);
729 	if (link_extra_fd >= 0)
730 		close(link_extra_fd);
731 
732 	uprobe_multi__destroy(skel);
733 	free(offsets);
734 }
735 
test_link_api(void)736 static void test_link_api(void)
737 {
738 	static struct child child;
739 
740 	/* no pid filter */
741 	__test_link_api(NULL);
742 
743 	/* pid filter */
744 	if (!ASSERT_OK(spawn_child(&child), "spawn_child"))
745 		return;
746 
747 	__test_link_api(&child);
748 
749 	/* pid filter (thread) */
750 	if (!ASSERT_OK(spawn_thread(&child), "spawn_thread"))
751 		return;
752 
753 	__test_link_api(&child);
754 }
755 
756 static struct bpf_program *
get_program(struct uprobe_multi_consumers * skel,int prog)757 get_program(struct uprobe_multi_consumers *skel, int prog)
758 {
759 	switch (prog) {
760 	case 0:
761 		return skel->progs.uprobe_0;
762 	case 1:
763 		return skel->progs.uprobe_1;
764 	case 2:
765 		return skel->progs.uprobe_2;
766 	case 3:
767 		return skel->progs.uprobe_3;
768 	default:
769 		ASSERT_FAIL("get_program");
770 		return NULL;
771 	}
772 }
773 
774 static struct bpf_link **
get_link(struct uprobe_multi_consumers * skel,int link)775 get_link(struct uprobe_multi_consumers *skel, int link)
776 {
777 	switch (link) {
778 	case 0:
779 		return &skel->links.uprobe_0;
780 	case 1:
781 		return &skel->links.uprobe_1;
782 	case 2:
783 		return &skel->links.uprobe_2;
784 	case 3:
785 		return &skel->links.uprobe_3;
786 	default:
787 		ASSERT_FAIL("get_link");
788 		return NULL;
789 	}
790 }
791 
uprobe_attach(struct uprobe_multi_consumers * skel,int idx,unsigned long offset)792 static int uprobe_attach(struct uprobe_multi_consumers *skel, int idx, unsigned long offset)
793 {
794 	struct bpf_program *prog = get_program(skel, idx);
795 	struct bpf_link **link = get_link(skel, idx);
796 	LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
797 
798 	if (!prog || !link)
799 		return -1;
800 
801 	opts.offsets = &offset;
802 	opts.cnt = 1;
803 
804 	/*
805 	 * bit/prog: 0 uprobe entry
806 	 * bit/prog: 1 uprobe return
807 	 * bit/prog: 2 uprobe session without return
808 	 * bit/prog: 3 uprobe session with return
809 	 */
810 	opts.retprobe = idx == 1;
811 	opts.session  = idx == 2 || idx == 3;
812 
813 	*link = bpf_program__attach_uprobe_multi(prog, 0, "/proc/self/exe", NULL, &opts);
814 	if (!ASSERT_OK_PTR(*link, "bpf_program__attach_uprobe_multi"))
815 		return -1;
816 	return 0;
817 }
818 
uprobe_detach(struct uprobe_multi_consumers * skel,int idx)819 static void uprobe_detach(struct uprobe_multi_consumers *skel, int idx)
820 {
821 	struct bpf_link **link = get_link(skel, idx);
822 
823 	bpf_link__destroy(*link);
824 	*link = NULL;
825 }
826 
test_bit(int bit,unsigned long val)827 static bool test_bit(int bit, unsigned long val)
828 {
829 	return val & (1 << bit);
830 }
831 
832 noinline int
uprobe_consumer_test(struct uprobe_multi_consumers * skel,unsigned long before,unsigned long after,unsigned long offset)833 uprobe_consumer_test(struct uprobe_multi_consumers *skel,
834 		     unsigned long before, unsigned long after,
835 		     unsigned long offset)
836 {
837 	int idx;
838 
839 	/* detach uprobe for each unset programs in 'before' state ... */
840 	for (idx = 0; idx < 4; idx++) {
841 		if (test_bit(idx, before) && !test_bit(idx, after))
842 			uprobe_detach(skel, idx);
843 	}
844 
845 	/* ... and attach all new programs in 'after' state */
846 	for (idx = 0; idx < 4; idx++) {
847 		if (!test_bit(idx, before) && test_bit(idx, after)) {
848 			if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_after"))
849 				return -1;
850 		}
851 	}
852 	return 0;
853 }
854 
855 /*
856  * We generate 16 consumer_testX functions that will have uprobe installed on
857  * and will be called in separate threads. All function pointer are stored in
858  * "consumers" section and each thread will pick one function based on index.
859  */
860 
861 extern const void *__start_consumers;
862 
863 #define __CONSUMER_TEST(func) 							\
864 noinline int func(struct uprobe_multi_consumers *skel, unsigned long before,	\
865 		  unsigned long after, unsigned long offset)			\
866 {										\
867 	return uprobe_consumer_test(skel, before, after, offset);		\
868 }										\
869 void *__ ## func __used __attribute__((section("consumers"))) = (void *) func;
870 
871 #define CONSUMER_TEST(func) __CONSUMER_TEST(func)
872 
873 #define C1  CONSUMER_TEST(__PASTE(consumer_test, __COUNTER__))
874 #define C4  C1 C1 C1 C1
875 #define C16 C4 C4 C4 C4
876 
877 C16
878 
879 typedef int (*test_t)(struct uprobe_multi_consumers *, unsigned long,
880 		      unsigned long, unsigned long);
881 
consumer_test(struct uprobe_multi_consumers * skel,unsigned long before,unsigned long after,test_t test,unsigned long offset)882 static int consumer_test(struct uprobe_multi_consumers *skel,
883 			 unsigned long before, unsigned long after,
884 			 test_t test, unsigned long offset)
885 {
886 	int err, idx, ret = -1;
887 
888 	printf("consumer_test before %lu after %lu\n", before, after);
889 
890 	/* 'before' is each, we attach uprobe for every set idx */
891 	for (idx = 0; idx < 4; idx++) {
892 		if (test_bit(idx, before)) {
893 			if (!ASSERT_OK(uprobe_attach(skel, idx, offset), "uprobe_attach_before"))
894 				goto cleanup;
895 		}
896 	}
897 
898 	err = test(skel, before, after, offset);
899 	if (!ASSERT_EQ(err, 0, "uprobe_consumer_test"))
900 		goto cleanup;
901 
902 	for (idx = 0; idx < 4; idx++) {
903 		bool uret_stays, uret_survives;
904 		const char *fmt = "BUG";
905 		__u64 val = 0;
906 
907 		switch (idx) {
908 		case 0:
909 			/*
910 			 * uprobe entry
911 			 *   +1 if define in 'before'
912 			 */
913 			if (test_bit(idx, before))
914 				val++;
915 			fmt = "prog 0: uprobe";
916 			break;
917 		case 1:
918 			/*
919 			 * To trigger uretprobe consumer, the uretprobe under test either stayed from
920 			 * before to after (uret_stays + test_bit) or uretprobe instance survived and
921 			 * we have uretprobe active in after (uret_survives + test_bit)
922 			 */
923 			uret_stays = before & after & 0b0110;
924 			uret_survives = ((before & 0b0110) && (after & 0b0110) && (before & 0b1001));
925 
926 			if ((uret_stays || uret_survives) && test_bit(idx, after))
927 				val++;
928 			fmt = "prog 1: uretprobe";
929 			break;
930 		case 2:
931 			/*
932 			 * session with return
933 			 *  +1 if defined in 'before'
934 			 *  +1 if defined in 'after'
935 			 */
936 			if (test_bit(idx, before)) {
937 				val++;
938 				if (test_bit(idx, after))
939 					val++;
940 			}
941 			fmt = "prog 2: session with return";
942 			break;
943 		case 3:
944 			/*
945 			 * session without return
946 			 *   +1 if defined in 'before'
947 			 */
948 			if (test_bit(idx, before))
949 				val++;
950 			fmt = "prog 3: session with NO return";
951 			break;
952 		}
953 
954 		if (!ASSERT_EQ(skel->bss->uprobe_result[idx], val, fmt))
955 			goto cleanup;
956 		skel->bss->uprobe_result[idx] = 0;
957 	}
958 
959 	ret = 0;
960 
961 cleanup:
962 	for (idx = 0; idx < 4; idx++)
963 		uprobe_detach(skel, idx);
964 	return ret;
965 }
966 
967 #define CONSUMER_MAX 16
968 
969 /*
970  * Each thread runs 1/16 of the load by running test for single
971  * 'before' number (based on thread index) and full scale of
972  * 'after' numbers.
973  */
consumer_thread(void * arg)974 static void *consumer_thread(void *arg)
975 {
976 	unsigned long idx = (unsigned long) arg;
977 	struct uprobe_multi_consumers *skel;
978 	unsigned long offset;
979 	const void *func;
980 	int after;
981 
982 	skel = uprobe_multi_consumers__open_and_load();
983 	if (!ASSERT_OK_PTR(skel, "uprobe_multi_consumers__open_and_load"))
984 		return NULL;
985 
986 	func = *((&__start_consumers) + idx);
987 
988 	offset = get_uprobe_offset(func);
989 	if (!ASSERT_GE(offset, 0, "uprobe_offset"))
990 		goto out;
991 
992 	for (after = 0; after < CONSUMER_MAX; after++)
993 		if (consumer_test(skel, idx, after, func, offset))
994 			goto out;
995 
996 out:
997 	uprobe_multi_consumers__destroy(skel);
998 	return NULL;
999 }
1000 
1001 
test_consumers(void)1002 static void test_consumers(void)
1003 {
1004 	pthread_t pt[CONSUMER_MAX];
1005 	unsigned long idx;
1006 	int err;
1007 
1008 	/*
1009 	 * The idea of this test is to try all possible combinations of
1010 	 * uprobes consumers attached on single function.
1011 	 *
1012 	 *  - 1 uprobe entry consumer
1013 	 *  - 1 uprobe exit consumer
1014 	 *  - 1 uprobe session with return
1015 	 *  - 1 uprobe session without return
1016 	 *
1017 	 * The test uses 4 uprobes attached on single function, but that
1018 	 * translates into single uprobe with 4 consumers in kernel.
1019 	 *
1020 	 * The before/after values present the state of attached consumers
1021 	 * before and after the probed function:
1022 	 *
1023 	 *  bit/prog 0 : uprobe entry
1024 	 *  bit/prog 1 : uprobe return
1025 	 *
1026 	 * For example for:
1027 	 *
1028 	 *   before = 0b01
1029 	 *   after  = 0b10
1030 	 *
1031 	 * it means that before we call 'uprobe_consumer_test' we attach
1032 	 * uprobes defined in 'before' value:
1033 	 *
1034 	 *   - bit/prog 1: uprobe entry
1035 	 *
1036 	 * uprobe_consumer_test is called and inside it we attach and detach
1037 	 * uprobes based on 'after' value:
1038 	 *
1039 	 *   - bit/prog 0: is detached
1040 	 *   - bit/prog 1: is attached
1041 	 *
1042 	 * uprobe_consumer_test returns and we check counters values increased
1043 	 * by bpf programs on each uprobe to match the expected count based on
1044 	 * before/after bits.
1045 	 */
1046 
1047 	for (idx = 0; idx < CONSUMER_MAX; idx++) {
1048 		err = pthread_create(&pt[idx], NULL, consumer_thread, (void *) idx);
1049 		if (!ASSERT_OK(err, "pthread_create"))
1050 			break;
1051 	}
1052 
1053 	while (idx)
1054 		pthread_join(pt[--idx], NULL);
1055 }
1056 
uprobe_multi_program(struct uprobe_multi_pid_filter * skel,int idx)1057 static struct bpf_program *uprobe_multi_program(struct uprobe_multi_pid_filter *skel, int idx)
1058 {
1059 	switch (idx) {
1060 	case 0: return skel->progs.uprobe_multi_0;
1061 	case 1: return skel->progs.uprobe_multi_1;
1062 	case 2: return skel->progs.uprobe_multi_2;
1063 	}
1064 	return NULL;
1065 }
1066 
1067 #define TASKS 3
1068 
run_pid_filter(struct uprobe_multi_pid_filter * skel,bool clone_vm,bool retprobe)1069 static void run_pid_filter(struct uprobe_multi_pid_filter *skel, bool clone_vm, bool retprobe)
1070 {
1071 	LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, .retprobe = retprobe);
1072 	struct bpf_link *link[TASKS] = {};
1073 	struct child child[TASKS] = {};
1074 	int i;
1075 
1076 	memset(skel->bss->test, 0, sizeof(skel->bss->test));
1077 
1078 	for (i = 0; i < TASKS; i++) {
1079 		if (!ASSERT_OK(spawn_child_flag(&child[i], clone_vm), "spawn_child"))
1080 			goto cleanup;
1081 		skel->bss->pids[i] = child[i].pid;
1082 	}
1083 
1084 	for (i = 0; i < TASKS; i++) {
1085 		link[i] = bpf_program__attach_uprobe_multi(uprobe_multi_program(skel, i),
1086 							   child[i].pid, "/proc/self/exe",
1087 							   "uprobe_multi_func_1", &opts);
1088 		if (!ASSERT_OK_PTR(link[i], "bpf_program__attach_uprobe_multi"))
1089 			goto cleanup;
1090 	}
1091 
1092 	for (i = 0; i < TASKS; i++)
1093 		kick_child(&child[i]);
1094 
1095 	for (i = 0; i < TASKS; i++) {
1096 		ASSERT_EQ(skel->bss->test[i][0], 1, "pid");
1097 		ASSERT_EQ(skel->bss->test[i][1], 0, "unknown");
1098 	}
1099 
1100 cleanup:
1101 	for (i = 0; i < TASKS; i++)
1102 		bpf_link__destroy(link[i]);
1103 	for (i = 0; i < TASKS; i++)
1104 		release_child(&child[i]);
1105 }
1106 
test_pid_filter_process(bool clone_vm)1107 static void test_pid_filter_process(bool clone_vm)
1108 {
1109 	struct uprobe_multi_pid_filter *skel;
1110 
1111 	skel = uprobe_multi_pid_filter__open_and_load();
1112 	if (!ASSERT_OK_PTR(skel, "uprobe_multi_pid_filter__open_and_load"))
1113 		return;
1114 
1115 	run_pid_filter(skel, clone_vm, false);
1116 	run_pid_filter(skel, clone_vm, true);
1117 
1118 	uprobe_multi_pid_filter__destroy(skel);
1119 }
1120 
test_session_skel_api(void)1121 static void test_session_skel_api(void)
1122 {
1123 	struct uprobe_multi_session *skel = NULL;
1124 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
1125 	struct bpf_link *link = NULL;
1126 	int err;
1127 
1128 	skel = uprobe_multi_session__open_and_load();
1129 	if (!ASSERT_OK_PTR(skel, "uprobe_multi_session__open_and_load"))
1130 		goto cleanup;
1131 
1132 	skel->bss->pid = getpid();
1133 	skel->bss->user_ptr = test_data;
1134 
1135 	err = uprobe_multi_session__attach(skel);
1136 	if (!ASSERT_OK(err, "uprobe_multi_session__attach"))
1137 		goto cleanup;
1138 
1139 	/* trigger all probes */
1140 	skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
1141 	skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
1142 	skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
1143 
1144 	uprobe_multi_func_1();
1145 	uprobe_multi_func_2();
1146 	uprobe_multi_func_3();
1147 
1148 	/*
1149 	 * We expect 2 for uprobe_multi_func_2 because it runs both entry/return probe,
1150 	 * uprobe_multi_func_[13] run just the entry probe. All expected numbers are
1151 	 * doubled, because we run extra test for sleepable session.
1152 	 */
1153 	ASSERT_EQ(skel->bss->uprobe_session_result[0], 2, "uprobe_multi_func_1_result");
1154 	ASSERT_EQ(skel->bss->uprobe_session_result[1], 4, "uprobe_multi_func_2_result");
1155 	ASSERT_EQ(skel->bss->uprobe_session_result[2], 2, "uprobe_multi_func_3_result");
1156 
1157 	/* We expect increase in 3 entry and 1 return session calls -> 4 */
1158 	ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 4, "uprobe_multi_sleep_result");
1159 
1160 cleanup:
1161 	bpf_link__destroy(link);
1162 	uprobe_multi_session__destroy(skel);
1163 }
1164 
test_session_single_skel_api(void)1165 static void test_session_single_skel_api(void)
1166 {
1167 	struct uprobe_multi_session_single *skel = NULL;
1168 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
1169 	int err;
1170 
1171 	skel = uprobe_multi_session_single__open_and_load();
1172 	if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_single__open_and_load"))
1173 		goto cleanup;
1174 
1175 	skel->bss->pid = getpid();
1176 
1177 	err = uprobe_multi_session_single__attach(skel);
1178 	if (!ASSERT_OK(err, "uprobe_multi_session_single__attach"))
1179 		goto cleanup;
1180 
1181 	uprobe_multi_func_1();
1182 
1183 	/*
1184 	 * We expect consumer 0 and 2 to trigger just entry handler (value 1)
1185 	 * and consumer 1 to hit both (value 2).
1186 	 */
1187 	ASSERT_EQ(skel->bss->uprobe_session_result[0], 1, "uprobe_session_result_0");
1188 	ASSERT_EQ(skel->bss->uprobe_session_result[1], 2, "uprobe_session_result_1");
1189 	ASSERT_EQ(skel->bss->uprobe_session_result[2], 1, "uprobe_session_result_2");
1190 
1191 cleanup:
1192 	uprobe_multi_session_single__destroy(skel);
1193 }
1194 
test_session_cookie_skel_api(void)1195 static void test_session_cookie_skel_api(void)
1196 {
1197 	struct uprobe_multi_session_cookie *skel = NULL;
1198 	int err;
1199 
1200 	skel = uprobe_multi_session_cookie__open_and_load();
1201 	if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_cookie__open_and_load"))
1202 		goto cleanup;
1203 
1204 	skel->bss->pid = getpid();
1205 
1206 	err = uprobe_multi_session_cookie__attach(skel);
1207 	if (!ASSERT_OK(err, "uprobe_multi_session_cookie__attach"))
1208 		goto cleanup;
1209 
1210 	/* trigger all probes */
1211 	uprobe_multi_func_1();
1212 	uprobe_multi_func_2();
1213 	uprobe_multi_func_3();
1214 
1215 	ASSERT_EQ(skel->bss->test_uprobe_1_result, 1, "test_uprobe_1_result");
1216 	ASSERT_EQ(skel->bss->test_uprobe_2_result, 2, "test_uprobe_2_result");
1217 	ASSERT_EQ(skel->bss->test_uprobe_3_result, 3, "test_uprobe_3_result");
1218 
1219 cleanup:
1220 	uprobe_multi_session_cookie__destroy(skel);
1221 }
1222 
test_session_recursive_skel_api(void)1223 static void test_session_recursive_skel_api(void)
1224 {
1225 	struct uprobe_multi_session_recursive *skel = NULL;
1226 	int i, err;
1227 
1228 	skel = uprobe_multi_session_recursive__open_and_load();
1229 	if (!ASSERT_OK_PTR(skel, "uprobe_multi_session_recursive__open_and_load"))
1230 		goto cleanup;
1231 
1232 	skel->bss->pid = getpid();
1233 
1234 	err = uprobe_multi_session_recursive__attach(skel);
1235 	if (!ASSERT_OK(err, "uprobe_multi_session_recursive__attach"))
1236 		goto cleanup;
1237 
1238 	for (i = 0; i < ARRAY_SIZE(skel->bss->test_uprobe_cookie_entry); i++)
1239 		skel->bss->test_uprobe_cookie_entry[i] = i + 1;
1240 
1241 	uprobe_session_recursive(5);
1242 
1243 	/*
1244 	 *                                         entry uprobe:
1245 	 * uprobe_session_recursive(5) {             *cookie = 1, return 0
1246 	 *   uprobe_session_recursive(4) {           *cookie = 2, return 1
1247 	 *     uprobe_session_recursive(3) {         *cookie = 3, return 0
1248 	 *       uprobe_session_recursive(2) {       *cookie = 4, return 1
1249 	 *         uprobe_session_recursive(1) {     *cookie = 5, return 0
1250 	 *           uprobe_session_recursive(0) {   *cookie = 6, return 1
1251 	 *                                          return uprobe:
1252 	 *           } i = 0                          not executed
1253 	 *         } i = 1                            test_uprobe_cookie_return[0] = 5
1254 	 *       } i = 2                              not executed
1255 	 *     } i = 3                                test_uprobe_cookie_return[1] = 3
1256 	 *   } i = 4                                  not executed
1257 	 * } i = 5                                    test_uprobe_cookie_return[2] = 1
1258 	 */
1259 
1260 	ASSERT_EQ(skel->bss->idx_entry, 6, "idx_entry");
1261 	ASSERT_EQ(skel->bss->idx_return, 3, "idx_return");
1262 
1263 	ASSERT_EQ(skel->bss->test_uprobe_cookie_return[0], 5, "test_uprobe_cookie_return[0]");
1264 	ASSERT_EQ(skel->bss->test_uprobe_cookie_return[1], 3, "test_uprobe_cookie_return[1]");
1265 	ASSERT_EQ(skel->bss->test_uprobe_cookie_return[2], 1, "test_uprobe_cookie_return[2]");
1266 
1267 cleanup:
1268 	uprobe_multi_session_recursive__destroy(skel);
1269 }
1270 
test_bench_attach_uprobe(void)1271 static void test_bench_attach_uprobe(void)
1272 {
1273 	long attach_start_ns = 0, attach_end_ns = 0;
1274 	struct uprobe_multi_bench *skel = NULL;
1275 	long detach_start_ns, detach_end_ns;
1276 	double attach_delta, detach_delta;
1277 	int err;
1278 
1279 	skel = uprobe_multi_bench__open_and_load();
1280 	if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load"))
1281 		goto cleanup;
1282 
1283 	attach_start_ns = get_time_ns();
1284 
1285 	err = uprobe_multi_bench__attach(skel);
1286 	if (!ASSERT_OK(err, "uprobe_multi_bench__attach"))
1287 		goto cleanup;
1288 
1289 	attach_end_ns = get_time_ns();
1290 
1291 	system("./uprobe_multi bench");
1292 
1293 	ASSERT_EQ(skel->bss->count, 50000, "uprobes_count");
1294 
1295 cleanup:
1296 	detach_start_ns = get_time_ns();
1297 	uprobe_multi_bench__destroy(skel);
1298 	detach_end_ns = get_time_ns();
1299 
1300 	attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
1301 	detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
1302 
1303 	printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
1304 	printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
1305 }
1306 
test_bench_attach_usdt(void)1307 static void test_bench_attach_usdt(void)
1308 {
1309 	long attach_start_ns = 0, attach_end_ns = 0;
1310 	struct uprobe_multi_usdt *skel = NULL;
1311 	long detach_start_ns, detach_end_ns;
1312 	double attach_delta, detach_delta;
1313 
1314 	skel = uprobe_multi_usdt__open_and_load();
1315 	if (!ASSERT_OK_PTR(skel, "uprobe_multi__open"))
1316 		goto cleanup;
1317 
1318 	attach_start_ns = get_time_ns();
1319 
1320 	skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi",
1321 						     "test", "usdt", NULL);
1322 	if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt"))
1323 		goto cleanup;
1324 
1325 	attach_end_ns = get_time_ns();
1326 
1327 	system("./uprobe_multi usdt");
1328 
1329 	ASSERT_EQ(skel->bss->count, 50000, "usdt_count");
1330 
1331 cleanup:
1332 	detach_start_ns = get_time_ns();
1333 	uprobe_multi_usdt__destroy(skel);
1334 	detach_end_ns = get_time_ns();
1335 
1336 	attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
1337 	detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
1338 
1339 	printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
1340 	printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
1341 }
1342 
test_uprobe_multi_test(void)1343 void test_uprobe_multi_test(void)
1344 {
1345 	if (test__start_subtest("skel_api"))
1346 		test_skel_api();
1347 	if (test__start_subtest("attach_api_pattern"))
1348 		test_attach_api_pattern();
1349 	if (test__start_subtest("attach_api_syms"))
1350 		test_attach_api_syms();
1351 	if (test__start_subtest("link_api"))
1352 		test_link_api();
1353 	if (test__start_subtest("bench_uprobe"))
1354 		test_bench_attach_uprobe();
1355 	if (test__start_subtest("bench_usdt"))
1356 		test_bench_attach_usdt();
1357 	if (test__start_subtest("attach_api_fails"))
1358 		test_attach_api_fails();
1359 	if (test__start_subtest("attach_uprobe_fails"))
1360 		test_attach_uprobe_fails();
1361 	if (test__start_subtest("consumers"))
1362 		test_consumers();
1363 	if (test__start_subtest("filter_fork"))
1364 		test_pid_filter_process(false);
1365 	if (test__start_subtest("filter_clone_vm"))
1366 		test_pid_filter_process(true);
1367 	if (test__start_subtest("session"))
1368 		test_session_skel_api();
1369 	if (test__start_subtest("session_single"))
1370 		test_session_single_skel_api();
1371 	if (test__start_subtest("session_cookie"))
1372 		test_session_cookie_skel_api();
1373 	if (test__start_subtest("session_cookie_recursive"))
1374 		test_session_recursive_skel_api();
1375 	RUN_TESTS(uprobe_multi_verifier);
1376 }
1377