xref: /linux/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include "kprobe_multi.skel.h"
4 #include "trace_helpers.h"
5 #include "kprobe_multi_empty.skel.h"
6 #include "kprobe_multi_override.skel.h"
7 #include "kprobe_multi_session.skel.h"
8 #include "kprobe_multi_session_cookie.skel.h"
9 #include "bpf/libbpf_internal.h"
10 #include "bpf/hashmap.h"
11 
kprobe_multi_test_run(struct kprobe_multi * skel,bool test_return)12 static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return)
13 {
14 	LIBBPF_OPTS(bpf_test_run_opts, topts);
15 	int err, prog_fd;
16 
17 	prog_fd = bpf_program__fd(skel->progs.trigger);
18 	err = bpf_prog_test_run_opts(prog_fd, &topts);
19 	ASSERT_OK(err, "test_run");
20 	ASSERT_EQ(topts.retval, 0, "test_run");
21 
22 	ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result");
23 	ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result");
24 	ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result");
25 	ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result");
26 	ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result");
27 	ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result");
28 	ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result");
29 	ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result");
30 
31 	if (test_return) {
32 		ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result");
33 		ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result");
34 		ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result");
35 		ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result");
36 		ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result");
37 		ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result");
38 		ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result");
39 		ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result");
40 	}
41 }
42 
test_skel_api(void)43 static void test_skel_api(void)
44 {
45 	struct kprobe_multi *skel = NULL;
46 	int err;
47 
48 	skel = kprobe_multi__open_and_load();
49 	if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load"))
50 		goto cleanup;
51 
52 	skel->bss->pid = getpid();
53 	err = kprobe_multi__attach(skel);
54 	if (!ASSERT_OK(err, "kprobe_multi__attach"))
55 		goto cleanup;
56 
57 	kprobe_multi_test_run(skel, true);
58 
59 cleanup:
60 	kprobe_multi__destroy(skel);
61 }
62 
test_link_api(struct bpf_link_create_opts * opts)63 static void test_link_api(struct bpf_link_create_opts *opts)
64 {
65 	int prog_fd, link1_fd = -1, link2_fd = -1;
66 	struct kprobe_multi *skel = NULL;
67 
68 	skel = kprobe_multi__open_and_load();
69 	if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
70 		goto cleanup;
71 
72 	skel->bss->pid = getpid();
73 	prog_fd = bpf_program__fd(skel->progs.test_kprobe);
74 	link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts);
75 	if (!ASSERT_GE(link1_fd, 0, "link_fd"))
76 		goto cleanup;
77 
78 	opts->kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
79 	prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
80 	link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts);
81 	if (!ASSERT_GE(link2_fd, 0, "link_fd"))
82 		goto cleanup;
83 
84 	kprobe_multi_test_run(skel, true);
85 
86 cleanup:
87 	if (link1_fd != -1)
88 		close(link1_fd);
89 	if (link2_fd != -1)
90 		close(link2_fd);
91 	kprobe_multi__destroy(skel);
92 }
93 
94 #define GET_ADDR(__sym, __addr) ({					\
95 	__addr = ksym_get_addr(__sym);					\
96 	if (!ASSERT_NEQ(__addr, 0, "kallsyms load failed for " #__sym))	\
97 		return;							\
98 })
99 
test_link_api_addrs(void)100 static void test_link_api_addrs(void)
101 {
102 	LIBBPF_OPTS(bpf_link_create_opts, opts);
103 	unsigned long long addrs[8];
104 
105 	GET_ADDR("bpf_fentry_test1", addrs[0]);
106 	GET_ADDR("bpf_fentry_test2", addrs[1]);
107 	GET_ADDR("bpf_fentry_test3", addrs[2]);
108 	GET_ADDR("bpf_fentry_test4", addrs[3]);
109 	GET_ADDR("bpf_fentry_test5", addrs[4]);
110 	GET_ADDR("bpf_fentry_test6", addrs[5]);
111 	GET_ADDR("bpf_fentry_test7", addrs[6]);
112 	GET_ADDR("bpf_fentry_test8", addrs[7]);
113 
114 	opts.kprobe_multi.addrs = (const unsigned long*) addrs;
115 	opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
116 	test_link_api(&opts);
117 }
118 
test_link_api_syms(void)119 static void test_link_api_syms(void)
120 {
121 	LIBBPF_OPTS(bpf_link_create_opts, opts);
122 	const char *syms[8] = {
123 		"bpf_fentry_test1",
124 		"bpf_fentry_test2",
125 		"bpf_fentry_test3",
126 		"bpf_fentry_test4",
127 		"bpf_fentry_test5",
128 		"bpf_fentry_test6",
129 		"bpf_fentry_test7",
130 		"bpf_fentry_test8",
131 	};
132 
133 	opts.kprobe_multi.syms = syms;
134 	opts.kprobe_multi.cnt = ARRAY_SIZE(syms);
135 	test_link_api(&opts);
136 }
137 
138 static void
test_attach_api(const char * pattern,struct bpf_kprobe_multi_opts * opts)139 test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts)
140 {
141 	struct bpf_link *link1 = NULL, *link2 = NULL;
142 	struct kprobe_multi *skel = NULL;
143 
144 	skel = kprobe_multi__open_and_load();
145 	if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
146 		goto cleanup;
147 
148 	skel->bss->pid = getpid();
149 	link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
150 						      pattern, opts);
151 	if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
152 		goto cleanup;
153 
154 	if (opts) {
155 		opts->retprobe = true;
156 		link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe_manual,
157 							      pattern, opts);
158 		if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
159 			goto cleanup;
160 	}
161 
162 	kprobe_multi_test_run(skel, !!opts);
163 
164 cleanup:
165 	bpf_link__destroy(link2);
166 	bpf_link__destroy(link1);
167 	kprobe_multi__destroy(skel);
168 }
169 
test_attach_api_pattern(void)170 static void test_attach_api_pattern(void)
171 {
172 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
173 
174 	test_attach_api("bpf_fentry_test*", &opts);
175 	test_attach_api("bpf_fentry_test?", NULL);
176 }
177 
test_attach_api_addrs(void)178 static void test_attach_api_addrs(void)
179 {
180 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
181 	unsigned long long addrs[8];
182 
183 	GET_ADDR("bpf_fentry_test1", addrs[0]);
184 	GET_ADDR("bpf_fentry_test2", addrs[1]);
185 	GET_ADDR("bpf_fentry_test3", addrs[2]);
186 	GET_ADDR("bpf_fentry_test4", addrs[3]);
187 	GET_ADDR("bpf_fentry_test5", addrs[4]);
188 	GET_ADDR("bpf_fentry_test6", addrs[5]);
189 	GET_ADDR("bpf_fentry_test7", addrs[6]);
190 	GET_ADDR("bpf_fentry_test8", addrs[7]);
191 
192 	opts.addrs = (const unsigned long *) addrs;
193 	opts.cnt = ARRAY_SIZE(addrs);
194 	test_attach_api(NULL, &opts);
195 }
196 
test_attach_api_syms(void)197 static void test_attach_api_syms(void)
198 {
199 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
200 	const char *syms[8] = {
201 		"bpf_fentry_test1",
202 		"bpf_fentry_test2",
203 		"bpf_fentry_test3",
204 		"bpf_fentry_test4",
205 		"bpf_fentry_test5",
206 		"bpf_fentry_test6",
207 		"bpf_fentry_test7",
208 		"bpf_fentry_test8",
209 	};
210 
211 	opts.syms = syms;
212 	opts.cnt = ARRAY_SIZE(syms);
213 	test_attach_api(NULL, &opts);
214 }
215 
test_attach_api_fails(void)216 static void test_attach_api_fails(void)
217 {
218 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
219 	struct kprobe_multi *skel = NULL;
220 	struct bpf_link *link = NULL;
221 	unsigned long long addrs[2];
222 	const char *syms[2] = {
223 		"bpf_fentry_test1",
224 		"bpf_fentry_test2",
225 	};
226 	__u64 cookies[2];
227 	int saved_error;
228 
229 	addrs[0] = ksym_get_addr("bpf_fentry_test1");
230 	addrs[1] = ksym_get_addr("bpf_fentry_test2");
231 
232 	if (!ASSERT_FALSE(!addrs[0] || !addrs[1], "ksym_get_addr"))
233 		goto cleanup;
234 
235 	skel = kprobe_multi__open_and_load();
236 	if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
237 		goto cleanup;
238 
239 	skel->bss->pid = getpid();
240 
241 	/* fail_1 - pattern and opts NULL */
242 	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
243 						     NULL, NULL);
244 	saved_error = -errno;
245 	if (!ASSERT_ERR_PTR(link, "fail_1"))
246 		goto cleanup;
247 
248 	if (!ASSERT_EQ(saved_error, -EINVAL, "fail_1_error"))
249 		goto cleanup;
250 
251 	/* fail_2 - both addrs and syms set */
252 	opts.addrs = (const unsigned long *) addrs;
253 	opts.syms = syms;
254 	opts.cnt = ARRAY_SIZE(syms);
255 	opts.cookies = NULL;
256 
257 	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
258 						     NULL, &opts);
259 	saved_error = -errno;
260 	if (!ASSERT_ERR_PTR(link, "fail_2"))
261 		goto cleanup;
262 
263 	if (!ASSERT_EQ(saved_error, -EINVAL, "fail_2_error"))
264 		goto cleanup;
265 
266 	/* fail_3 - pattern and addrs set */
267 	opts.addrs = (const unsigned long *) addrs;
268 	opts.syms = NULL;
269 	opts.cnt = ARRAY_SIZE(syms);
270 	opts.cookies = NULL;
271 
272 	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
273 						     "ksys_*", &opts);
274 	saved_error = -errno;
275 	if (!ASSERT_ERR_PTR(link, "fail_3"))
276 		goto cleanup;
277 
278 	if (!ASSERT_EQ(saved_error, -EINVAL, "fail_3_error"))
279 		goto cleanup;
280 
281 	/* fail_4 - pattern and cnt set */
282 	opts.addrs = NULL;
283 	opts.syms = NULL;
284 	opts.cnt = ARRAY_SIZE(syms);
285 	opts.cookies = NULL;
286 
287 	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
288 						     "ksys_*", &opts);
289 	saved_error = -errno;
290 	if (!ASSERT_ERR_PTR(link, "fail_4"))
291 		goto cleanup;
292 
293 	if (!ASSERT_EQ(saved_error, -EINVAL, "fail_4_error"))
294 		goto cleanup;
295 
296 	/* fail_5 - pattern and cookies */
297 	opts.addrs = NULL;
298 	opts.syms = NULL;
299 	opts.cnt = 0;
300 	opts.cookies = cookies;
301 
302 	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
303 						     "ksys_*", &opts);
304 	saved_error = -errno;
305 	if (!ASSERT_ERR_PTR(link, "fail_5"))
306 		goto cleanup;
307 
308 	if (!ASSERT_EQ(saved_error, -EINVAL, "fail_5_error"))
309 		goto cleanup;
310 
311 	/* fail_6 - abnormal cnt */
312 	opts.addrs = (const unsigned long *) addrs;
313 	opts.syms = NULL;
314 	opts.cnt = INT_MAX;
315 	opts.cookies = NULL;
316 
317 	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
318 						     NULL, &opts);
319 	saved_error = -errno;
320 	if (!ASSERT_ERR_PTR(link, "fail_6"))
321 		goto cleanup;
322 
323 	if (!ASSERT_EQ(saved_error, -E2BIG, "fail_6_error"))
324 		goto cleanup;
325 
326 cleanup:
327 	bpf_link__destroy(link);
328 	kprobe_multi__destroy(skel);
329 }
330 
test_session_skel_api(void)331 static void test_session_skel_api(void)
332 {
333 	struct kprobe_multi_session *skel = NULL;
334 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
335 	LIBBPF_OPTS(bpf_test_run_opts, topts);
336 	struct bpf_link *link = NULL;
337 	int i, err, prog_fd;
338 
339 	skel = kprobe_multi_session__open_and_load();
340 	if (!ASSERT_OK_PTR(skel, "kprobe_multi_session__open_and_load"))
341 		return;
342 
343 	skel->bss->pid = getpid();
344 
345 	err = kprobe_multi_session__attach(skel);
346 	if (!ASSERT_OK(err, " kprobe_multi_session__attach"))
347 		goto cleanup;
348 
349 	prog_fd = bpf_program__fd(skel->progs.trigger);
350 	err = bpf_prog_test_run_opts(prog_fd, &topts);
351 	ASSERT_OK(err, "test_run");
352 	ASSERT_EQ(topts.retval, 0, "test_run");
353 
354 	/* bpf_fentry_test1-4 trigger return probe, result is 2 */
355 	for (i = 0; i < 4; i++)
356 		ASSERT_EQ(skel->bss->kprobe_session_result[i], 2, "kprobe_session_result");
357 
358 	/* bpf_fentry_test5-8 trigger only entry probe, result is 1 */
359 	for (i = 4; i < 8; i++)
360 		ASSERT_EQ(skel->bss->kprobe_session_result[i], 1, "kprobe_session_result");
361 
362 cleanup:
363 	bpf_link__destroy(link);
364 	kprobe_multi_session__destroy(skel);
365 }
366 
test_session_cookie_skel_api(void)367 static void test_session_cookie_skel_api(void)
368 {
369 	struct kprobe_multi_session_cookie *skel = NULL;
370 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
371 	LIBBPF_OPTS(bpf_test_run_opts, topts);
372 	struct bpf_link *link = NULL;
373 	int err, prog_fd;
374 
375 	skel = kprobe_multi_session_cookie__open_and_load();
376 	if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load"))
377 		return;
378 
379 	skel->bss->pid = getpid();
380 
381 	err = kprobe_multi_session_cookie__attach(skel);
382 	if (!ASSERT_OK(err, " kprobe_multi_wrapper__attach"))
383 		goto cleanup;
384 
385 	prog_fd = bpf_program__fd(skel->progs.trigger);
386 	err = bpf_prog_test_run_opts(prog_fd, &topts);
387 	ASSERT_OK(err, "test_run");
388 	ASSERT_EQ(topts.retval, 0, "test_run");
389 
390 	ASSERT_EQ(skel->bss->test_kprobe_1_result, 1, "test_kprobe_1_result");
391 	ASSERT_EQ(skel->bss->test_kprobe_2_result, 2, "test_kprobe_2_result");
392 	ASSERT_EQ(skel->bss->test_kprobe_3_result, 3, "test_kprobe_3_result");
393 
394 cleanup:
395 	bpf_link__destroy(link);
396 	kprobe_multi_session_cookie__destroy(skel);
397 }
398 
symbol_hash(long key,void * ctx __maybe_unused)399 static size_t symbol_hash(long key, void *ctx __maybe_unused)
400 {
401 	return str_hash((const char *) key);
402 }
403 
symbol_equal(long key1,long key2,void * ctx __maybe_unused)404 static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused)
405 {
406 	return strcmp((const char *) key1, (const char *) key2) == 0;
407 }
408 
is_invalid_entry(char * buf,bool kernel)409 static bool is_invalid_entry(char *buf, bool kernel)
410 {
411 	if (kernel && strchr(buf, '['))
412 		return true;
413 	if (!kernel && !strchr(buf, '['))
414 		return true;
415 	return false;
416 }
417 
skip_entry(char * name)418 static bool skip_entry(char *name)
419 {
420 	/*
421 	 * We attach to almost all kernel functions and some of them
422 	 * will cause 'suspicious RCU usage' when fprobe is attached
423 	 * to them. Filter out the current culprits - arch_cpu_idle
424 	 * default_idle and rcu_* functions.
425 	 */
426 	if (!strcmp(name, "arch_cpu_idle"))
427 		return true;
428 	if (!strcmp(name, "default_idle"))
429 		return true;
430 	if (!strncmp(name, "rcu_", 4))
431 		return true;
432 	if (!strcmp(name, "bpf_dispatcher_xdp_func"))
433 		return true;
434 	if (!strncmp(name, "__ftrace_invalid_address__",
435 		     sizeof("__ftrace_invalid_address__") - 1))
436 		return true;
437 	return false;
438 }
439 
440 /* Do comparision by ignoring '.llvm.<hash>' suffixes. */
compare_name(const char * name1,const char * name2)441 static int compare_name(const char *name1, const char *name2)
442 {
443 	const char *res1, *res2;
444 	int len1, len2;
445 
446 	res1 = strstr(name1, ".llvm.");
447 	res2 = strstr(name2, ".llvm.");
448 	len1 = res1 ? res1 - name1 : strlen(name1);
449 	len2 = res2 ? res2 - name2 : strlen(name2);
450 
451 	if (len1 == len2)
452 		return strncmp(name1, name2, len1);
453 	if (len1 < len2)
454 		return strncmp(name1, name2, len1) <= 0 ? -1 : 1;
455 	return strncmp(name1, name2, len2) >= 0 ? 1 : -1;
456 }
457 
load_kallsyms_compare(const void * p1,const void * p2)458 static int load_kallsyms_compare(const void *p1, const void *p2)
459 {
460 	return compare_name(((const struct ksym *)p1)->name, ((const struct ksym *)p2)->name);
461 }
462 
search_kallsyms_compare(const void * p1,const struct ksym * p2)463 static int search_kallsyms_compare(const void *p1, const struct ksym *p2)
464 {
465 	return compare_name(p1, p2->name);
466 }
467 
get_syms(char *** symsp,size_t * cntp,bool kernel)468 static int get_syms(char ***symsp, size_t *cntp, bool kernel)
469 {
470 	size_t cap = 0, cnt = 0;
471 	char *name = NULL, *ksym_name, **syms = NULL;
472 	struct hashmap *map;
473 	struct ksyms *ksyms;
474 	struct ksym *ks;
475 	char buf[256];
476 	FILE *f;
477 	int err = 0;
478 
479 	ksyms = load_kallsyms_custom_local(load_kallsyms_compare);
480 	if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_custom_local"))
481 		return -EINVAL;
482 
483 	/*
484 	 * The available_filter_functions contains many duplicates,
485 	 * but other than that all symbols are usable in kprobe multi
486 	 * interface.
487 	 * Filtering out duplicates by using hashmap__add, which won't
488 	 * add existing entry.
489 	 */
490 
491 	if (access("/sys/kernel/tracing/trace", F_OK) == 0)
492 		f = fopen("/sys/kernel/tracing/available_filter_functions", "r");
493 	else
494 		f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
495 
496 	if (!f)
497 		return -EINVAL;
498 
499 	map = hashmap__new(symbol_hash, symbol_equal, NULL);
500 	if (IS_ERR(map)) {
501 		err = libbpf_get_error(map);
502 		goto error;
503 	}
504 
505 	while (fgets(buf, sizeof(buf), f)) {
506 		if (is_invalid_entry(buf, kernel))
507 			continue;
508 
509 		free(name);
510 		if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
511 			continue;
512 		if (skip_entry(name))
513 			continue;
514 
515 		ks = search_kallsyms_custom_local(ksyms, name, search_kallsyms_compare);
516 		if (!ks) {
517 			err = -EINVAL;
518 			goto error;
519 		}
520 
521 		ksym_name = ks->name;
522 		err = hashmap__add(map, ksym_name, 0);
523 		if (err == -EEXIST) {
524 			err = 0;
525 			continue;
526 		}
527 		if (err)
528 			goto error;
529 
530 		err = libbpf_ensure_mem((void **) &syms, &cap,
531 					sizeof(*syms), cnt + 1);
532 		if (err)
533 			goto error;
534 
535 		syms[cnt++] = ksym_name;
536 	}
537 
538 	*symsp = syms;
539 	*cntp = cnt;
540 
541 error:
542 	free(name);
543 	fclose(f);
544 	hashmap__free(map);
545 	if (err)
546 		free(syms);
547 	return err;
548 }
549 
get_addrs(unsigned long ** addrsp,size_t * cntp,bool kernel)550 static int get_addrs(unsigned long **addrsp, size_t *cntp, bool kernel)
551 {
552 	unsigned long *addr, *addrs, *tmp_addrs;
553 	int err = 0, max_cnt, inc_cnt;
554 	char *name = NULL;
555 	size_t cnt = 0;
556 	char buf[256];
557 	FILE *f;
558 
559 	if (access("/sys/kernel/tracing/trace", F_OK) == 0)
560 		f = fopen("/sys/kernel/tracing/available_filter_functions_addrs", "r");
561 	else
562 		f = fopen("/sys/kernel/debug/tracing/available_filter_functions_addrs", "r");
563 
564 	if (!f)
565 		return -ENOENT;
566 
567 	/* In my local setup, the number of entries is 50k+ so Let us initially
568 	 * allocate space to hold 64k entries. If 64k is not enough, incrementally
569 	 * increase 1k each time.
570 	 */
571 	max_cnt = 65536;
572 	inc_cnt = 1024;
573 	addrs = malloc(max_cnt * sizeof(long));
574 	if (addrs == NULL) {
575 		err = -ENOMEM;
576 		goto error;
577 	}
578 
579 	while (fgets(buf, sizeof(buf), f)) {
580 		if (is_invalid_entry(buf, kernel))
581 			continue;
582 
583 		free(name);
584 		if (sscanf(buf, "%p %ms$*[^\n]\n", &addr, &name) != 2)
585 			continue;
586 		if (skip_entry(name))
587 			continue;
588 
589 		if (cnt == max_cnt) {
590 			max_cnt += inc_cnt;
591 			tmp_addrs = realloc(addrs, max_cnt);
592 			if (!tmp_addrs) {
593 				err = -ENOMEM;
594 				goto error;
595 			}
596 			addrs = tmp_addrs;
597 		}
598 
599 		addrs[cnt++] = (unsigned long)addr;
600 	}
601 
602 	*addrsp = addrs;
603 	*cntp = cnt;
604 
605 error:
606 	free(name);
607 	fclose(f);
608 	if (err)
609 		free(addrs);
610 	return err;
611 }
612 
do_bench_test(struct kprobe_multi_empty * skel,struct bpf_kprobe_multi_opts * opts)613 static void do_bench_test(struct kprobe_multi_empty *skel, struct bpf_kprobe_multi_opts *opts)
614 {
615 	long attach_start_ns, attach_end_ns;
616 	long detach_start_ns, detach_end_ns;
617 	double attach_delta, detach_delta;
618 	struct bpf_link *link = NULL;
619 
620 	attach_start_ns = get_time_ns();
621 	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_empty,
622 						     NULL, opts);
623 	attach_end_ns = get_time_ns();
624 
625 	if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
626 		return;
627 
628 	detach_start_ns = get_time_ns();
629 	bpf_link__destroy(link);
630 	detach_end_ns = get_time_ns();
631 
632 	attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
633 	detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
634 
635 	printf("%s: found %lu functions\n", __func__, opts->cnt);
636 	printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
637 	printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
638 }
639 
test_kprobe_multi_bench_attach(bool kernel)640 static void test_kprobe_multi_bench_attach(bool kernel)
641 {
642 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
643 	struct kprobe_multi_empty *skel = NULL;
644 	char **syms = NULL;
645 	size_t cnt = 0;
646 
647 	if (!ASSERT_OK(get_syms(&syms, &cnt, kernel), "get_syms"))
648 		return;
649 
650 	skel = kprobe_multi_empty__open_and_load();
651 	if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
652 		goto cleanup;
653 
654 	opts.syms = (const char **) syms;
655 	opts.cnt = cnt;
656 
657 	do_bench_test(skel, &opts);
658 
659 cleanup:
660 	kprobe_multi_empty__destroy(skel);
661 	if (syms)
662 		free(syms);
663 }
664 
test_kprobe_multi_bench_attach_addr(bool kernel)665 static void test_kprobe_multi_bench_attach_addr(bool kernel)
666 {
667 	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
668 	struct kprobe_multi_empty *skel = NULL;
669 	unsigned long *addrs = NULL;
670 	size_t cnt = 0;
671 	int err;
672 
673 	err = get_addrs(&addrs, &cnt, kernel);
674 	if (err == -ENOENT) {
675 		test__skip();
676 		return;
677 	}
678 
679 	if (!ASSERT_OK(err, "get_addrs"))
680 		return;
681 
682 	skel = kprobe_multi_empty__open_and_load();
683 	if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
684 		goto cleanup;
685 
686 	opts.addrs = addrs;
687 	opts.cnt = cnt;
688 
689 	do_bench_test(skel, &opts);
690 
691 cleanup:
692 	kprobe_multi_empty__destroy(skel);
693 	free(addrs);
694 }
695 
test_attach_override(void)696 static void test_attach_override(void)
697 {
698 	struct kprobe_multi_override *skel = NULL;
699 	struct bpf_link *link = NULL;
700 
701 	skel = kprobe_multi_override__open_and_load();
702 	if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
703 		goto cleanup;
704 
705 	/* The test_override calls bpf_override_return so it should fail
706 	 * to attach to bpf_fentry_test1 function, which is not on error
707 	 * injection list.
708 	 */
709 	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override,
710 						     "bpf_fentry_test1", NULL);
711 	if (!ASSERT_ERR_PTR(link, "override_attached_bpf_fentry_test1")) {
712 		bpf_link__destroy(link);
713 		goto cleanup;
714 	}
715 
716 	/* The should_fail_bio function is on error injection list,
717 	 * attach should succeed.
718 	 */
719 	link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_override,
720 						     "should_fail_bio", NULL);
721 	if (!ASSERT_OK_PTR(link, "override_attached_should_fail_bio"))
722 		goto cleanup;
723 
724 	bpf_link__destroy(link);
725 
726 cleanup:
727 	kprobe_multi_override__destroy(skel);
728 }
729 
serial_test_kprobe_multi_bench_attach(void)730 void serial_test_kprobe_multi_bench_attach(void)
731 {
732 	if (test__start_subtest("kernel"))
733 		test_kprobe_multi_bench_attach(true);
734 	if (test__start_subtest("modules"))
735 		test_kprobe_multi_bench_attach(false);
736 	if (test__start_subtest("kernel"))
737 		test_kprobe_multi_bench_attach_addr(true);
738 	if (test__start_subtest("modules"))
739 		test_kprobe_multi_bench_attach_addr(false);
740 }
741 
test_kprobe_multi_test(void)742 void test_kprobe_multi_test(void)
743 {
744 	if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
745 		return;
746 
747 	if (test__start_subtest("skel_api"))
748 		test_skel_api();
749 	if (test__start_subtest("link_api_addrs"))
750 		test_link_api_syms();
751 	if (test__start_subtest("link_api_syms"))
752 		test_link_api_addrs();
753 	if (test__start_subtest("attach_api_pattern"))
754 		test_attach_api_pattern();
755 	if (test__start_subtest("attach_api_addrs"))
756 		test_attach_api_addrs();
757 	if (test__start_subtest("attach_api_syms"))
758 		test_attach_api_syms();
759 	if (test__start_subtest("attach_api_fails"))
760 		test_attach_api_fails();
761 	if (test__start_subtest("attach_override"))
762 		test_attach_override();
763 	if (test__start_subtest("session"))
764 		test_session_skel_api();
765 	if (test__start_subtest("session_cookie"))
766 		test_session_cookie_skel_api();
767 }
768