1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include <unistd.h>
5 #include <sys/syscall.h>
6 #include <task_local_storage_helpers.h>
7 #include "bpf_iter_ipv6_route.skel.h"
8 #include "bpf_iter_netlink.skel.h"
9 #include "bpf_iter_bpf_map.skel.h"
10 #include "bpf_iter_tasks.skel.h"
11 #include "bpf_iter_task_stack.skel.h"
12 #include "bpf_iter_task_file.skel.h"
13 #include "bpf_iter_task_vmas.skel.h"
14 #include "bpf_iter_task_btf.skel.h"
15 #include "bpf_iter_tcp4.skel.h"
16 #include "bpf_iter_tcp6.skel.h"
17 #include "bpf_iter_udp4.skel.h"
18 #include "bpf_iter_udp6.skel.h"
19 #include "bpf_iter_unix.skel.h"
20 #include "bpf_iter_vma_offset.skel.h"
21 #include "bpf_iter_test_kern1.skel.h"
22 #include "bpf_iter_test_kern2.skel.h"
23 #include "bpf_iter_test_kern3.skel.h"
24 #include "bpf_iter_test_kern4.skel.h"
25 #include "bpf_iter_bpf_hash_map.skel.h"
26 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
27 #include "bpf_iter_bpf_array_map.skel.h"
28 #include "bpf_iter_bpf_percpu_array_map.skel.h"
29 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
30 #include "bpf_iter_bpf_sk_storage_map.skel.h"
31 #include "bpf_iter_test_kern5.skel.h"
32 #include "bpf_iter_test_kern6.skel.h"
33 #include "bpf_iter_bpf_link.skel.h"
34 #include "bpf_iter_ksym.skel.h"
35 #include "bpf_iter_sockmap.skel.h"
36
test_btf_id_or_null(void)37 static void test_btf_id_or_null(void)
38 {
39 struct bpf_iter_test_kern3 *skel;
40
41 skel = bpf_iter_test_kern3__open_and_load();
42 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
43 bpf_iter_test_kern3__destroy(skel);
44 return;
45 }
46 }
47
do_dummy_read_opts(struct bpf_program * prog,struct bpf_iter_attach_opts * opts)48 static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
49 {
50 struct bpf_link *link;
51 char buf[16] = {};
52 int iter_fd, len;
53
54 link = bpf_program__attach_iter(prog, opts);
55 if (!ASSERT_OK_PTR(link, "attach_iter"))
56 return;
57
58 iter_fd = bpf_iter_create(bpf_link__fd(link));
59 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
60 goto free_link;
61
62 /* not check contents, but ensure read() ends without error */
63 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
64 ;
65 ASSERT_GE(len, 0, "read");
66
67 close(iter_fd);
68
69 free_link:
70 bpf_link__destroy(link);
71 }
72
do_dummy_read(struct bpf_program * prog)73 static void do_dummy_read(struct bpf_program *prog)
74 {
75 do_dummy_read_opts(prog, NULL);
76 }
77
do_read_map_iter_fd(struct bpf_object_skeleton ** skel,struct bpf_program * prog,struct bpf_map * map)78 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
79 struct bpf_map *map)
80 {
81 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
82 union bpf_iter_link_info linfo;
83 struct bpf_link *link;
84 char buf[16] = {};
85 int iter_fd, len;
86
87 memset(&linfo, 0, sizeof(linfo));
88 linfo.map.map_fd = bpf_map__fd(map);
89 opts.link_info = &linfo;
90 opts.link_info_len = sizeof(linfo);
91 link = bpf_program__attach_iter(prog, &opts);
92 if (!ASSERT_OK_PTR(link, "attach_map_iter"))
93 return;
94
95 iter_fd = bpf_iter_create(bpf_link__fd(link));
96 if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
97 bpf_link__destroy(link);
98 return;
99 }
100
101 /* Close link and map fd prematurely */
102 bpf_link__destroy(link);
103 bpf_object__destroy_skeleton(*skel);
104 *skel = NULL;
105
106 /* Try to let map free work to run first if map is freed */
107 usleep(100);
108 /* Memory used by both sock map and sock local storage map are
109 * freed after two synchronize_rcu() calls, so wait for it
110 */
111 kern_sync_rcu();
112 kern_sync_rcu();
113
114 /* Read after both map fd and link fd are closed */
115 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
116 ;
117 ASSERT_GE(len, 0, "read_iterator");
118
119 close(iter_fd);
120 }
121
read_fd_into_buffer(int fd,char * buf,int size)122 static int read_fd_into_buffer(int fd, char *buf, int size)
123 {
124 int bufleft = size;
125 int len;
126
127 do {
128 len = read(fd, buf, bufleft);
129 if (len > 0) {
130 buf += len;
131 bufleft -= len;
132 }
133 } while (len > 0);
134
135 return len < 0 ? len : size - bufleft;
136 }
137
test_ipv6_route(void)138 static void test_ipv6_route(void)
139 {
140 struct bpf_iter_ipv6_route *skel;
141
142 skel = bpf_iter_ipv6_route__open_and_load();
143 if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
144 return;
145
146 do_dummy_read(skel->progs.dump_ipv6_route);
147
148 bpf_iter_ipv6_route__destroy(skel);
149 }
150
test_netlink(void)151 static void test_netlink(void)
152 {
153 struct bpf_iter_netlink *skel;
154
155 skel = bpf_iter_netlink__open_and_load();
156 if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
157 return;
158
159 do_dummy_read(skel->progs.dump_netlink);
160
161 bpf_iter_netlink__destroy(skel);
162 }
163
test_bpf_map(void)164 static void test_bpf_map(void)
165 {
166 struct bpf_iter_bpf_map *skel;
167
168 skel = bpf_iter_bpf_map__open_and_load();
169 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
170 return;
171
172 do_dummy_read(skel->progs.dump_bpf_map);
173
174 bpf_iter_bpf_map__destroy(skel);
175 }
176
check_bpf_link_info(const struct bpf_program * prog)177 static void check_bpf_link_info(const struct bpf_program *prog)
178 {
179 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
180 union bpf_iter_link_info linfo;
181 struct bpf_link_info info = {};
182 struct bpf_link *link;
183 __u32 info_len;
184 int err;
185
186 memset(&linfo, 0, sizeof(linfo));
187 linfo.task.tid = getpid();
188 opts.link_info = &linfo;
189 opts.link_info_len = sizeof(linfo);
190
191 link = bpf_program__attach_iter(prog, &opts);
192 if (!ASSERT_OK_PTR(link, "attach_iter"))
193 return;
194
195 info_len = sizeof(info);
196 err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
197 ASSERT_OK(err, "bpf_link_get_info_by_fd");
198 ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
199
200 bpf_link__destroy(link);
201 }
202
203 static pthread_mutex_t do_nothing_mutex;
204
do_nothing_wait(void * arg)205 static void *do_nothing_wait(void *arg)
206 {
207 pthread_mutex_lock(&do_nothing_mutex);
208 pthread_mutex_unlock(&do_nothing_mutex);
209
210 pthread_exit(arg);
211 }
212
test_task_common_nocheck(struct bpf_iter_attach_opts * opts,int * num_unknown,int * num_known)213 static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
214 int *num_unknown, int *num_known)
215 {
216 struct bpf_iter_tasks *skel;
217 pthread_t thread_id;
218 void *ret;
219
220 skel = bpf_iter_tasks__open_and_load();
221 if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load"))
222 return;
223
224 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
225
226 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
227 "pthread_create");
228
229 skel->bss->tid = sys_gettid();
230
231 do_dummy_read_opts(skel->progs.dump_task, opts);
232
233 *num_unknown = skel->bss->num_unknown_tid;
234 *num_known = skel->bss->num_known_tid;
235
236 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
237 ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
238 "pthread_join");
239
240 bpf_iter_tasks__destroy(skel);
241 }
242
test_task_common(struct bpf_iter_attach_opts * opts,int num_unknown,int num_known)243 static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
244 {
245 int num_unknown_tid, num_known_tid;
246
247 test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
248 ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
249 ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
250 }
251
run_test_task_tid(void * arg)252 static void *run_test_task_tid(void *arg)
253 {
254 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
255 union bpf_iter_link_info linfo;
256 int num_unknown_tid, num_known_tid;
257
258 ASSERT_NEQ(getpid(), sys_gettid(), "check_new_thread_id");
259
260 memset(&linfo, 0, sizeof(linfo));
261 linfo.task.tid = sys_gettid();
262 opts.link_info = &linfo;
263 opts.link_info_len = sizeof(linfo);
264 test_task_common(&opts, 0, 1);
265
266 linfo.task.tid = 0;
267 linfo.task.pid = getpid();
268 /* This includes the parent thread, this thread, watchdog timer thread
269 * and the do_nothing_wait thread
270 */
271 test_task_common(&opts, 3, 1);
272
273 test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
274 ASSERT_GT(num_unknown_tid, 2, "check_num_unknown_tid");
275 ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
276
277 return NULL;
278 }
279
test_task_tid(void)280 static void test_task_tid(void)
281 {
282 pthread_t thread_id;
283
284 /* Create a new thread so pid and tid aren't the same */
285 ASSERT_OK(pthread_create(&thread_id, NULL, &run_test_task_tid, NULL),
286 "pthread_create");
287 ASSERT_FALSE(pthread_join(thread_id, NULL), "pthread_join");
288 }
289
test_task_pid(void)290 static void test_task_pid(void)
291 {
292 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
293 union bpf_iter_link_info linfo;
294
295 memset(&linfo, 0, sizeof(linfo));
296 linfo.task.pid = getpid();
297 opts.link_info = &linfo;
298 opts.link_info_len = sizeof(linfo);
299
300 test_task_common(&opts, 2, 1);
301 }
302
test_task_pidfd(void)303 static void test_task_pidfd(void)
304 {
305 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
306 union bpf_iter_link_info linfo;
307 int pidfd;
308
309 pidfd = sys_pidfd_open(getpid(), 0);
310 if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open"))
311 return;
312
313 memset(&linfo, 0, sizeof(linfo));
314 linfo.task.pid_fd = pidfd;
315 opts.link_info = &linfo;
316 opts.link_info_len = sizeof(linfo);
317
318 test_task_common(&opts, 2, 1);
319
320 close(pidfd);
321 }
322
test_task_sleepable(void)323 static void test_task_sleepable(void)
324 {
325 struct bpf_iter_tasks *skel;
326 int pid, status, err, data_pipe[2], finish_pipe[2], c = 0;
327 char *test_data = NULL;
328 char *test_data_long = NULL;
329 char *data[2];
330
331 if (!ASSERT_OK(pipe(data_pipe), "data_pipe") ||
332 !ASSERT_OK(pipe(finish_pipe), "finish_pipe"))
333 return;
334
335 skel = bpf_iter_tasks__open_and_load();
336 if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load"))
337 return;
338
339 pid = fork();
340 if (!ASSERT_GE(pid, 0, "fork"))
341 return;
342
343 if (pid == 0) {
344 /* child */
345 close(data_pipe[0]);
346 close(finish_pipe[1]);
347
348 test_data = malloc(sizeof(char) * 10);
349 strscpy(test_data, "test_data", 10);
350
351 test_data_long = malloc(sizeof(char) * 5000);
352 for (int i = 0; i < 5000; ++i) {
353 if (i % 2 == 0)
354 test_data_long[i] = 'b';
355 else
356 test_data_long[i] = 'a';
357 }
358 test_data_long[4999] = '\0';
359
360 data[0] = test_data;
361 data[1] = test_data_long;
362
363 write(data_pipe[1], &data, sizeof(data));
364
365 /* keep child alive until after the test */
366 err = read(finish_pipe[0], &c, 1);
367 if (err != 1)
368 exit(-1);
369
370 close(data_pipe[1]);
371 close(finish_pipe[0]);
372 _exit(0);
373 }
374
375 /* parent */
376 close(data_pipe[1]);
377 close(finish_pipe[0]);
378
379 err = read(data_pipe[0], &data, sizeof(data));
380 ASSERT_EQ(err, sizeof(data), "read_check");
381
382 skel->bss->user_ptr = data[0];
383 skel->bss->user_ptr_long = data[1];
384 skel->bss->pid = pid;
385
386 do_dummy_read(skel->progs.dump_task_sleepable);
387
388 ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
389 "num_expected_failure_copy_from_user_task");
390 ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
391 "num_success_copy_from_user_task");
392 ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task_str, 0,
393 "num_expected_failure_copy_from_user_task_str");
394 ASSERT_GT(skel->bss->num_success_copy_from_user_task_str, 0,
395 "num_success_copy_from_user_task_str");
396
397 bpf_iter_tasks__destroy(skel);
398
399 write(finish_pipe[1], &c, 1);
400 err = waitpid(pid, &status, 0);
401 ASSERT_EQ(err, pid, "waitpid");
402 ASSERT_EQ(status, 0, "zero_child_exit");
403
404 close(data_pipe[0]);
405 close(finish_pipe[1]);
406 }
407
test_task_stack(void)408 static void test_task_stack(void)
409 {
410 struct bpf_iter_task_stack *skel;
411
412 skel = bpf_iter_task_stack__open_and_load();
413 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
414 return;
415
416 do_dummy_read(skel->progs.dump_task_stack);
417 do_dummy_read(skel->progs.get_task_user_stacks);
418
419 ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
420
421 bpf_iter_task_stack__destroy(skel);
422 }
423
test_task_file(void)424 static void test_task_file(void)
425 {
426 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
427 struct bpf_iter_task_file *skel;
428 union bpf_iter_link_info linfo;
429 pthread_t thread_id;
430 void *ret;
431
432 skel = bpf_iter_task_file__open_and_load();
433 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
434 return;
435
436 skel->bss->tgid = getpid();
437
438 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
439
440 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
441 "pthread_create");
442
443 memset(&linfo, 0, sizeof(linfo));
444 linfo.task.tid = getpid();
445 opts.link_info = &linfo;
446 opts.link_info_len = sizeof(linfo);
447
448 do_dummy_read_opts(skel->progs.dump_task_file, &opts);
449
450 ASSERT_EQ(skel->bss->count, 0, "check_count");
451 ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
452
453 skel->bss->last_tgid = 0;
454 skel->bss->count = 0;
455 skel->bss->unique_tgid_count = 0;
456
457 do_dummy_read(skel->progs.dump_task_file);
458
459 ASSERT_EQ(skel->bss->count, 0, "check_count");
460 ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
461
462 check_bpf_link_info(skel->progs.dump_task_file);
463
464 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
465 ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
466 ASSERT_NULL(ret, "pthread_join");
467
468 bpf_iter_task_file__destroy(skel);
469 }
470
471 #define TASKBUFSZ 32768
472
473 static char taskbuf[TASKBUFSZ];
474
do_btf_read(struct bpf_iter_task_btf * skel)475 static int do_btf_read(struct bpf_iter_task_btf *skel)
476 {
477 struct bpf_program *prog = skel->progs.dump_task_struct;
478 struct bpf_iter_task_btf__bss *bss = skel->bss;
479 int iter_fd = -1, err;
480 struct bpf_link *link;
481 char *buf = taskbuf;
482 int ret = 0;
483
484 link = bpf_program__attach_iter(prog, NULL);
485 if (!ASSERT_OK_PTR(link, "attach_iter"))
486 return ret;
487
488 iter_fd = bpf_iter_create(bpf_link__fd(link));
489 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
490 goto free_link;
491
492 err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
493 if (bss->skip) {
494 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
495 ret = 1;
496 test__skip();
497 goto free_link;
498 }
499
500 if (!ASSERT_GE(err, 0, "read"))
501 goto free_link;
502
503 ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
504 "check for btf representation of task_struct in iter data");
505 free_link:
506 if (iter_fd > 0)
507 close(iter_fd);
508 bpf_link__destroy(link);
509 return ret;
510 }
511
test_task_btf(void)512 static void test_task_btf(void)
513 {
514 struct bpf_iter_task_btf__bss *bss;
515 struct bpf_iter_task_btf *skel;
516 int ret;
517
518 skel = bpf_iter_task_btf__open_and_load();
519 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
520 return;
521
522 bss = skel->bss;
523
524 ret = do_btf_read(skel);
525 if (ret)
526 goto cleanup;
527
528 if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
529 goto cleanup;
530
531 ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
532
533 cleanup:
534 bpf_iter_task_btf__destroy(skel);
535 }
536
test_tcp4(void)537 static void test_tcp4(void)
538 {
539 struct bpf_iter_tcp4 *skel;
540
541 skel = bpf_iter_tcp4__open_and_load();
542 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
543 return;
544
545 do_dummy_read(skel->progs.dump_tcp4);
546
547 bpf_iter_tcp4__destroy(skel);
548 }
549
test_tcp6(void)550 static void test_tcp6(void)
551 {
552 struct bpf_iter_tcp6 *skel;
553
554 skel = bpf_iter_tcp6__open_and_load();
555 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
556 return;
557
558 do_dummy_read(skel->progs.dump_tcp6);
559
560 bpf_iter_tcp6__destroy(skel);
561 }
562
test_udp4(void)563 static void test_udp4(void)
564 {
565 struct bpf_iter_udp4 *skel;
566
567 skel = bpf_iter_udp4__open_and_load();
568 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
569 return;
570
571 do_dummy_read(skel->progs.dump_udp4);
572
573 bpf_iter_udp4__destroy(skel);
574 }
575
test_udp6(void)576 static void test_udp6(void)
577 {
578 struct bpf_iter_udp6 *skel;
579
580 skel = bpf_iter_udp6__open_and_load();
581 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
582 return;
583
584 do_dummy_read(skel->progs.dump_udp6);
585
586 bpf_iter_udp6__destroy(skel);
587 }
588
test_unix(void)589 static void test_unix(void)
590 {
591 struct bpf_iter_unix *skel;
592
593 skel = bpf_iter_unix__open_and_load();
594 if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
595 return;
596
597 do_dummy_read(skel->progs.dump_unix);
598
599 bpf_iter_unix__destroy(skel);
600 }
601
602 /* The expected string is less than 16 bytes */
do_read_with_fd(int iter_fd,const char * expected,bool read_one_char)603 static int do_read_with_fd(int iter_fd, const char *expected,
604 bool read_one_char)
605 {
606 int len, read_buf_len, start;
607 char buf[16] = {};
608
609 read_buf_len = read_one_char ? 1 : 16;
610 start = 0;
611 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
612 start += len;
613 if (!ASSERT_LT(start, 16, "read"))
614 return -1;
615 read_buf_len = read_one_char ? 1 : 16 - start;
616 }
617 if (!ASSERT_GE(len, 0, "read"))
618 return -1;
619
620 if (!ASSERT_STREQ(buf, expected, "read"))
621 return -1;
622
623 return 0;
624 }
625
test_anon_iter(bool read_one_char)626 static void test_anon_iter(bool read_one_char)
627 {
628 struct bpf_iter_test_kern1 *skel;
629 struct bpf_link *link;
630 int iter_fd, err;
631
632 skel = bpf_iter_test_kern1__open_and_load();
633 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
634 return;
635
636 err = bpf_iter_test_kern1__attach(skel);
637 if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
638 goto out;
639 }
640
641 link = skel->links.dump_task;
642 iter_fd = bpf_iter_create(bpf_link__fd(link));
643 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
644 goto out;
645
646 do_read_with_fd(iter_fd, "abcd", read_one_char);
647 close(iter_fd);
648
649 out:
650 bpf_iter_test_kern1__destroy(skel);
651 }
652
do_read(const char * path,const char * expected)653 static int do_read(const char *path, const char *expected)
654 {
655 int err, iter_fd;
656
657 iter_fd = open(path, O_RDONLY);
658 if (!ASSERT_GE(iter_fd, 0, "open"))
659 return -1;
660
661 err = do_read_with_fd(iter_fd, expected, false);
662 close(iter_fd);
663 return err;
664 }
665
test_file_iter(void)666 static void test_file_iter(void)
667 {
668 const char *path = "/sys/fs/bpf/bpf_iter_test1";
669 struct bpf_iter_test_kern1 *skel1;
670 struct bpf_iter_test_kern2 *skel2;
671 struct bpf_link *link;
672 int err;
673
674 skel1 = bpf_iter_test_kern1__open_and_load();
675 if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
676 return;
677
678 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
679 if (!ASSERT_OK_PTR(link, "attach_iter"))
680 goto out;
681
682 /* unlink this path if it exists. */
683 unlink(path);
684
685 err = bpf_link__pin(link, path);
686 if (!ASSERT_OK(err, "pin_iter"))
687 goto free_link;
688
689 err = do_read(path, "abcd");
690 if (err)
691 goto unlink_path;
692
693 /* file based iterator seems working fine. Let us a link update
694 * of the underlying link and `cat` the iterator again, its content
695 * should change.
696 */
697 skel2 = bpf_iter_test_kern2__open_and_load();
698 if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
699 goto unlink_path;
700
701 err = bpf_link__update_program(link, skel2->progs.dump_task);
702 if (!ASSERT_OK(err, "update_prog"))
703 goto destroy_skel2;
704
705 do_read(path, "ABCD");
706
707 destroy_skel2:
708 bpf_iter_test_kern2__destroy(skel2);
709 unlink_path:
710 unlink(path);
711 free_link:
712 bpf_link__destroy(link);
713 out:
714 bpf_iter_test_kern1__destroy(skel1);
715 }
716
test_overflow(bool test_e2big_overflow,bool ret1)717 static void test_overflow(bool test_e2big_overflow, bool ret1)
718 {
719 __u32 map_info_len, total_read_len, expected_read_len;
720 int err, iter_fd, map1_fd, map2_fd, len;
721 struct bpf_map_info map_info = {};
722 struct bpf_iter_test_kern4 *skel;
723 struct bpf_link *link;
724 __u32 iter_size;
725 char *buf;
726
727 skel = bpf_iter_test_kern4__open();
728 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
729 return;
730
731 /* create two maps: bpf program will only do bpf_seq_write
732 * for these two maps. The goal is one map output almost
733 * fills seq_file buffer and then the other will trigger
734 * overflow and needs restart.
735 */
736 map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
737 if (!ASSERT_GE(map1_fd, 0, "bpf_map_create"))
738 goto out;
739 map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
740 if (!ASSERT_GE(map2_fd, 0, "bpf_map_create"))
741 goto free_map1;
742
743 /* bpf_seq_printf kernel buffer is 8 pages, so one map
744 * bpf_seq_write will mostly fill it, and the other map
745 * will partially fill and then trigger overflow and need
746 * bpf_seq_read restart.
747 */
748 iter_size = sysconf(_SC_PAGE_SIZE) << 3;
749
750 if (test_e2big_overflow) {
751 skel->rodata->print_len = (iter_size + 8) / 8;
752 expected_read_len = 2 * (iter_size + 8);
753 } else if (!ret1) {
754 skel->rodata->print_len = (iter_size - 8) / 8;
755 expected_read_len = 2 * (iter_size - 8);
756 } else {
757 skel->rodata->print_len = 1;
758 expected_read_len = 2 * 8;
759 }
760 skel->rodata->ret1 = ret1;
761
762 if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
763 "bpf_iter_test_kern4__load"))
764 goto free_map2;
765
766 /* setup filtering map_id in bpf program */
767 map_info_len = sizeof(map_info);
768 err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
769 if (!ASSERT_OK(err, "get_map_info"))
770 goto free_map2;
771 skel->bss->map1_id = map_info.id;
772
773 err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
774 if (!ASSERT_OK(err, "get_map_info"))
775 goto free_map2;
776 skel->bss->map2_id = map_info.id;
777
778 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
779 if (!ASSERT_OK_PTR(link, "attach_iter"))
780 goto free_map2;
781
782 iter_fd = bpf_iter_create(bpf_link__fd(link));
783 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
784 goto free_link;
785
786 buf = malloc(expected_read_len);
787 if (!ASSERT_OK_PTR(buf, "malloc"))
788 goto close_iter;
789
790 /* do read */
791 total_read_len = 0;
792 if (test_e2big_overflow) {
793 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
794 total_read_len += len;
795
796 ASSERT_EQ(len, -1, "read");
797 ASSERT_EQ(errno, E2BIG, "read");
798 goto free_buf;
799 } else if (!ret1) {
800 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
801 total_read_len += len;
802
803 if (!ASSERT_GE(len, 0, "read"))
804 goto free_buf;
805 } else {
806 do {
807 len = read(iter_fd, buf, expected_read_len);
808 if (len > 0)
809 total_read_len += len;
810 } while (len > 0 || len == -EAGAIN);
811
812 if (!ASSERT_GE(len, 0, "read"))
813 goto free_buf;
814 }
815
816 if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
817 goto free_buf;
818
819 if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
820 goto free_buf;
821
822 if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
823 goto free_buf;
824
825 ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
826
827 free_buf:
828 free(buf);
829 close_iter:
830 close(iter_fd);
831 free_link:
832 bpf_link__destroy(link);
833 free_map2:
834 close(map2_fd);
835 free_map1:
836 close(map1_fd);
837 out:
838 bpf_iter_test_kern4__destroy(skel);
839 }
840
test_bpf_hash_map(void)841 static void test_bpf_hash_map(void)
842 {
843 __u32 expected_key_a = 0, expected_key_b = 0;
844 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
845 struct bpf_iter_bpf_hash_map *skel;
846 int err, i, len, map_fd, iter_fd;
847 union bpf_iter_link_info linfo;
848 __u64 val, expected_val = 0;
849 struct bpf_link *link;
850 struct key_t {
851 int a;
852 int b;
853 int c;
854 } key;
855 char buf[64];
856
857 skel = bpf_iter_bpf_hash_map__open();
858 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
859 return;
860
861 skel->bss->in_test_mode = true;
862
863 err = bpf_iter_bpf_hash_map__load(skel);
864 if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
865 goto out;
866
867 /* iterator with hashmap2 and hashmap3 should fail */
868 memset(&linfo, 0, sizeof(linfo));
869 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
870 opts.link_info = &linfo;
871 opts.link_info_len = sizeof(linfo);
872 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
873 if (!ASSERT_ERR_PTR(link, "attach_iter"))
874 goto out;
875
876 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
877 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
878 if (!ASSERT_ERR_PTR(link, "attach_iter"))
879 goto out;
880
881 /* hashmap1 should be good, update map values here */
882 map_fd = bpf_map__fd(skel->maps.hashmap1);
883 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
884 key.a = i + 1;
885 key.b = i + 2;
886 key.c = i + 3;
887 val = i + 4;
888 expected_key_a += key.a;
889 expected_key_b += key.b;
890 expected_val += val;
891
892 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
893 if (!ASSERT_OK(err, "map_update"))
894 goto out;
895 }
896
897 /* Sleepable program is prohibited for hash map iterator */
898 linfo.map.map_fd = map_fd;
899 link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
900 if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
901 goto out;
902
903 linfo.map.map_fd = map_fd;
904 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
905 if (!ASSERT_OK_PTR(link, "attach_iter"))
906 goto out;
907
908 iter_fd = bpf_iter_create(bpf_link__fd(link));
909 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
910 goto free_link;
911
912 /* do some tests */
913 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
914 ;
915 if (!ASSERT_GE(len, 0, "read"))
916 goto close_iter;
917
918 /* test results */
919 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
920 goto close_iter;
921 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
922 goto close_iter;
923 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
924 goto close_iter;
925
926 close_iter:
927 close(iter_fd);
928 free_link:
929 bpf_link__destroy(link);
930 out:
931 bpf_iter_bpf_hash_map__destroy(skel);
932 }
933
test_bpf_percpu_hash_map(void)934 static void test_bpf_percpu_hash_map(void)
935 {
936 __u32 expected_key_a = 0, expected_key_b = 0;
937 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
938 struct bpf_iter_bpf_percpu_hash_map *skel;
939 int err, i, j, len, map_fd, iter_fd;
940 union bpf_iter_link_info linfo;
941 __u32 expected_val = 0;
942 struct bpf_link *link;
943 struct key_t {
944 int a;
945 int b;
946 int c;
947 } key;
948 char buf[64];
949 void *val;
950
951 skel = bpf_iter_bpf_percpu_hash_map__open();
952 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
953 return;
954
955 skel->rodata->num_cpus = bpf_num_possible_cpus();
956 val = malloc(8 * bpf_num_possible_cpus());
957 if (!ASSERT_OK_PTR(val, "malloc"))
958 goto out;
959
960 err = bpf_iter_bpf_percpu_hash_map__load(skel);
961 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
962 goto out;
963
964 /* update map values here */
965 map_fd = bpf_map__fd(skel->maps.hashmap1);
966 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
967 key.a = i + 1;
968 key.b = i + 2;
969 key.c = i + 3;
970 expected_key_a += key.a;
971 expected_key_b += key.b;
972
973 for (j = 0; j < bpf_num_possible_cpus(); j++) {
974 *(__u32 *)(val + j * 8) = i + j;
975 expected_val += i + j;
976 }
977
978 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
979 if (!ASSERT_OK(err, "map_update"))
980 goto out;
981 }
982
983 memset(&linfo, 0, sizeof(linfo));
984 linfo.map.map_fd = map_fd;
985 opts.link_info = &linfo;
986 opts.link_info_len = sizeof(linfo);
987 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
988 if (!ASSERT_OK_PTR(link, "attach_iter"))
989 goto out;
990
991 iter_fd = bpf_iter_create(bpf_link__fd(link));
992 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
993 goto free_link;
994
995 /* do some tests */
996 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
997 ;
998 if (!ASSERT_GE(len, 0, "read"))
999 goto close_iter;
1000
1001 /* test results */
1002 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
1003 goto close_iter;
1004 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
1005 goto close_iter;
1006 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1007 goto close_iter;
1008
1009 close_iter:
1010 close(iter_fd);
1011 free_link:
1012 bpf_link__destroy(link);
1013 out:
1014 bpf_iter_bpf_percpu_hash_map__destroy(skel);
1015 free(val);
1016 }
1017
test_bpf_array_map(void)1018 static void test_bpf_array_map(void)
1019 {
1020 __u64 val, expected_val = 0, res_first_val, first_val = 0;
1021 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1022 __u32 key, expected_key = 0, res_first_key;
1023 int err, i, map_fd, hash_fd, iter_fd;
1024 struct bpf_iter_bpf_array_map *skel;
1025 union bpf_iter_link_info linfo;
1026 struct bpf_link *link;
1027 char buf[64] = {};
1028 int len, start;
1029
1030 skel = bpf_iter_bpf_array_map__open_and_load();
1031 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
1032 return;
1033
1034 map_fd = bpf_map__fd(skel->maps.arraymap1);
1035 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1036 val = i + 4;
1037 expected_key += i;
1038 expected_val += val;
1039
1040 if (i == 0)
1041 first_val = val;
1042
1043 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
1044 if (!ASSERT_OK(err, "map_update"))
1045 goto out;
1046 }
1047
1048 memset(&linfo, 0, sizeof(linfo));
1049 linfo.map.map_fd = map_fd;
1050 opts.link_info = &linfo;
1051 opts.link_info_len = sizeof(linfo);
1052 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
1053 if (!ASSERT_OK_PTR(link, "attach_iter"))
1054 goto out;
1055
1056 iter_fd = bpf_iter_create(bpf_link__fd(link));
1057 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1058 goto free_link;
1059
1060 /* do some tests */
1061 start = 0;
1062 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
1063 start += len;
1064 if (!ASSERT_GE(len, 0, "read"))
1065 goto close_iter;
1066
1067 /* test results */
1068 res_first_key = *(__u32 *)buf;
1069 res_first_val = *(__u64 *)(buf + sizeof(__u32));
1070 if (!ASSERT_EQ(res_first_key, 0, "bpf_seq_write") ||
1071 !ASSERT_EQ(res_first_val, first_val, "bpf_seq_write"))
1072 goto close_iter;
1073
1074 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1075 goto close_iter;
1076 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1077 goto close_iter;
1078
1079 hash_fd = bpf_map__fd(skel->maps.hashmap1);
1080 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1081 err = bpf_map_lookup_elem(map_fd, &i, &val);
1082 if (!ASSERT_OK(err, "map_lookup arraymap1"))
1083 goto close_iter;
1084 if (!ASSERT_EQ(i, val, "invalid_val arraymap1"))
1085 goto close_iter;
1086
1087 val = i + 4;
1088 err = bpf_map_lookup_elem(hash_fd, &val, &key);
1089 if (!ASSERT_OK(err, "map_lookup hashmap1"))
1090 goto close_iter;
1091 if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1"))
1092 goto close_iter;
1093 }
1094
1095 close_iter:
1096 close(iter_fd);
1097 free_link:
1098 bpf_link__destroy(link);
1099 out:
1100 bpf_iter_bpf_array_map__destroy(skel);
1101 }
1102
test_bpf_array_map_iter_fd(void)1103 static void test_bpf_array_map_iter_fd(void)
1104 {
1105 struct bpf_iter_bpf_array_map *skel;
1106
1107 skel = bpf_iter_bpf_array_map__open_and_load();
1108 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
1109 return;
1110
1111 do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
1112 skel->maps.arraymap1);
1113
1114 bpf_iter_bpf_array_map__destroy(skel);
1115 }
1116
test_bpf_percpu_array_map(void)1117 static void test_bpf_percpu_array_map(void)
1118 {
1119 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1120 struct bpf_iter_bpf_percpu_array_map *skel;
1121 __u32 expected_key = 0, expected_val = 0;
1122 union bpf_iter_link_info linfo;
1123 int err, i, j, map_fd, iter_fd;
1124 struct bpf_link *link;
1125 char buf[64];
1126 void *val;
1127 int len;
1128
1129 skel = bpf_iter_bpf_percpu_array_map__open();
1130 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
1131 return;
1132
1133 skel->rodata->num_cpus = bpf_num_possible_cpus();
1134 val = malloc(8 * bpf_num_possible_cpus());
1135 if (!ASSERT_OK_PTR(val, "malloc"))
1136 goto out;
1137
1138 err = bpf_iter_bpf_percpu_array_map__load(skel);
1139 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
1140 goto out;
1141
1142 /* update map values here */
1143 map_fd = bpf_map__fd(skel->maps.arraymap1);
1144 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1145 expected_key += i;
1146
1147 for (j = 0; j < bpf_num_possible_cpus(); j++) {
1148 *(__u32 *)(val + j * 8) = i + j;
1149 expected_val += i + j;
1150 }
1151
1152 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
1153 if (!ASSERT_OK(err, "map_update"))
1154 goto out;
1155 }
1156
1157 memset(&linfo, 0, sizeof(linfo));
1158 linfo.map.map_fd = map_fd;
1159 opts.link_info = &linfo;
1160 opts.link_info_len = sizeof(linfo);
1161 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
1162 if (!ASSERT_OK_PTR(link, "attach_iter"))
1163 goto out;
1164
1165 iter_fd = bpf_iter_create(bpf_link__fd(link));
1166 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1167 goto free_link;
1168
1169 /* do some tests */
1170 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1171 ;
1172 if (!ASSERT_GE(len, 0, "read"))
1173 goto close_iter;
1174
1175 /* test results */
1176 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1177 goto close_iter;
1178 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1179 goto close_iter;
1180
1181 close_iter:
1182 close(iter_fd);
1183 free_link:
1184 bpf_link__destroy(link);
1185 out:
1186 bpf_iter_bpf_percpu_array_map__destroy(skel);
1187 free(val);
1188 }
1189
1190 /* An iterator program deletes all local storage in a map. */
test_bpf_sk_storage_delete(void)1191 static void test_bpf_sk_storage_delete(void)
1192 {
1193 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1194 struct bpf_iter_bpf_sk_storage_helpers *skel;
1195 union bpf_iter_link_info linfo;
1196 int err, len, map_fd, iter_fd;
1197 struct bpf_link *link;
1198 int sock_fd = -1;
1199 __u32 val = 42;
1200 char buf[64];
1201
1202 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1203 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1204 return;
1205
1206 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1207
1208 sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1209 if (!ASSERT_GE(sock_fd, 0, "socket"))
1210 goto out;
1211
1212 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1213 if (!ASSERT_OK(err, "map_update"))
1214 goto out;
1215
1216 memset(&linfo, 0, sizeof(linfo));
1217 linfo.map.map_fd = map_fd;
1218 opts.link_info = &linfo;
1219 opts.link_info_len = sizeof(linfo);
1220 link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
1221 &opts);
1222 if (!ASSERT_OK_PTR(link, "attach_iter"))
1223 goto out;
1224
1225 iter_fd = bpf_iter_create(bpf_link__fd(link));
1226 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1227 goto free_link;
1228
1229 /* do some tests */
1230 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1231 ;
1232 if (!ASSERT_GE(len, 0, "read"))
1233 goto close_iter;
1234
1235 /* test results */
1236 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1237
1238 /* Note: The following assertions serve to ensure
1239 * the value was deleted. It does so by asserting
1240 * that bpf_map_lookup_elem has failed. This might
1241 * seem counterintuitive at first.
1242 */
1243 ASSERT_ERR(err, "bpf_map_lookup_elem");
1244 ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem");
1245
1246 close_iter:
1247 close(iter_fd);
1248 free_link:
1249 bpf_link__destroy(link);
1250 out:
1251 if (sock_fd >= 0)
1252 close(sock_fd);
1253 bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1254 }
1255
1256 /* This creates a socket and its local storage. It then runs a task_iter BPF
1257 * program that replaces the existing socket local storage with the tgid of the
1258 * only task owning a file descriptor to this socket, this process, prog_tests.
1259 * It then runs a tcp socket iterator that negates the value in the existing
1260 * socket local storage, the test verifies that the resulting value is -pid.
1261 */
test_bpf_sk_storage_get(void)1262 static void test_bpf_sk_storage_get(void)
1263 {
1264 struct bpf_iter_bpf_sk_storage_helpers *skel;
1265 int err, map_fd, val = -1;
1266 int sock_fd = -1;
1267
1268 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1269 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1270 return;
1271
1272 sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1273 if (!ASSERT_GE(sock_fd, 0, "socket"))
1274 goto out;
1275
1276 err = listen(sock_fd, 1);
1277 if (!ASSERT_OK(err, "listen"))
1278 goto close_socket;
1279
1280 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1281
1282 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1283 if (!ASSERT_OK(err, "bpf_map_update_elem"))
1284 goto close_socket;
1285
1286 do_dummy_read(skel->progs.fill_socket_owner);
1287
1288 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1289 if (!ASSERT_OK(err, "bpf_map_lookup_elem") ||
1290 !ASSERT_EQ(val, getpid(), "bpf_map_lookup_elem"))
1291 goto close_socket;
1292
1293 do_dummy_read(skel->progs.negate_socket_local_storage);
1294
1295 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1296 ASSERT_OK(err, "bpf_map_lookup_elem");
1297 ASSERT_EQ(val, -getpid(), "bpf_map_lookup_elem");
1298
1299 close_socket:
1300 close(sock_fd);
1301 out:
1302 bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1303 }
1304
test_bpf_sk_storage_map_iter_fd(void)1305 static void test_bpf_sk_storage_map_iter_fd(void)
1306 {
1307 struct bpf_iter_bpf_sk_storage_map *skel;
1308
1309 skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1310 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1311 return;
1312
1313 do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
1314 skel->maps.sk_stg_map);
1315
1316 bpf_iter_bpf_sk_storage_map__destroy(skel);
1317 }
1318
test_bpf_sk_storage_map(void)1319 static void test_bpf_sk_storage_map(void)
1320 {
1321 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1322 int err, i, len, map_fd, iter_fd, num_sockets;
1323 struct bpf_iter_bpf_sk_storage_map *skel;
1324 union bpf_iter_link_info linfo;
1325 int sock_fd[3] = {-1, -1, -1};
1326 __u32 val, expected_val = 0;
1327 struct bpf_link *link;
1328 char buf[64];
1329
1330 skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1331 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1332 return;
1333
1334 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1335 num_sockets = ARRAY_SIZE(sock_fd);
1336 for (i = 0; i < num_sockets; i++) {
1337 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1338 if (!ASSERT_GE(sock_fd[i], 0, "socket"))
1339 goto out;
1340
1341 val = i + 1;
1342 expected_val += val;
1343
1344 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1345 BPF_NOEXIST);
1346 if (!ASSERT_OK(err, "map_update"))
1347 goto out;
1348 }
1349
1350 memset(&linfo, 0, sizeof(linfo));
1351 linfo.map.map_fd = map_fd;
1352 opts.link_info = &linfo;
1353 opts.link_info_len = sizeof(linfo);
1354 link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
1355 err = libbpf_get_error(link);
1356 if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
1357 if (!err)
1358 bpf_link__destroy(link);
1359 goto out;
1360 }
1361
1362 link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
1363 if (!ASSERT_OK_PTR(link, "attach_iter"))
1364 goto out;
1365
1366 iter_fd = bpf_iter_create(bpf_link__fd(link));
1367 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1368 goto free_link;
1369
1370 skel->bss->to_add_val = time(NULL);
1371 /* do some tests */
1372 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1373 ;
1374 if (!ASSERT_GE(len, 0, "read"))
1375 goto close_iter;
1376
1377 /* test results */
1378 if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
1379 goto close_iter;
1380
1381 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1382 goto close_iter;
1383
1384 for (i = 0; i < num_sockets; i++) {
1385 err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
1386 if (!ASSERT_OK(err, "map_lookup") ||
1387 !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
1388 break;
1389 }
1390
1391 close_iter:
1392 close(iter_fd);
1393 free_link:
1394 bpf_link__destroy(link);
1395 out:
1396 for (i = 0; i < num_sockets; i++) {
1397 if (sock_fd[i] >= 0)
1398 close(sock_fd[i]);
1399 }
1400 bpf_iter_bpf_sk_storage_map__destroy(skel);
1401 }
1402
test_rdonly_buf_out_of_bound(void)1403 static void test_rdonly_buf_out_of_bound(void)
1404 {
1405 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1406 struct bpf_iter_test_kern5 *skel;
1407 union bpf_iter_link_info linfo;
1408 struct bpf_link *link;
1409
1410 skel = bpf_iter_test_kern5__open_and_load();
1411 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
1412 return;
1413
1414 memset(&linfo, 0, sizeof(linfo));
1415 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1416 opts.link_info = &linfo;
1417 opts.link_info_len = sizeof(linfo);
1418 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1419 if (!ASSERT_ERR_PTR(link, "attach_iter"))
1420 bpf_link__destroy(link);
1421
1422 bpf_iter_test_kern5__destroy(skel);
1423 }
1424
test_buf_neg_offset(void)1425 static void test_buf_neg_offset(void)
1426 {
1427 struct bpf_iter_test_kern6 *skel;
1428
1429 skel = bpf_iter_test_kern6__open_and_load();
1430 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
1431 bpf_iter_test_kern6__destroy(skel);
1432 }
1433
test_link_iter(void)1434 static void test_link_iter(void)
1435 {
1436 struct bpf_iter_bpf_link *skel;
1437
1438 skel = bpf_iter_bpf_link__open_and_load();
1439 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
1440 return;
1441
1442 do_dummy_read(skel->progs.dump_bpf_link);
1443
1444 bpf_iter_bpf_link__destroy(skel);
1445 }
1446
test_ksym_iter(void)1447 static void test_ksym_iter(void)
1448 {
1449 struct bpf_iter_ksym *skel;
1450
1451 skel = bpf_iter_ksym__open_and_load();
1452 if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
1453 return;
1454
1455 do_dummy_read(skel->progs.dump_ksym);
1456
1457 bpf_iter_ksym__destroy(skel);
1458 }
1459
1460 #define CMP_BUFFER_SIZE 1024
1461 static char task_vma_output[CMP_BUFFER_SIZE];
1462 static char proc_maps_output[CMP_BUFFER_SIZE];
1463
1464 /* remove \0 and \t from str, and only keep the first line */
str_strip_first_line(char * str)1465 static void str_strip_first_line(char *str)
1466 {
1467 char *dst = str, *src = str;
1468
1469 do {
1470 if (*src == ' ' || *src == '\t')
1471 src++;
1472 else
1473 *(dst++) = *(src++);
1474
1475 } while (*src != '\0' && *src != '\n');
1476
1477 *dst = '\0';
1478 }
1479
test_task_vma_common(struct bpf_iter_attach_opts * opts)1480 static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
1481 {
1482 int err, iter_fd = -1, proc_maps_fd = -1;
1483 struct bpf_iter_task_vmas *skel;
1484 int len, read_size = 4;
1485 char maps_path[64];
1486
1487 skel = bpf_iter_task_vmas__open();
1488 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
1489 return;
1490
1491 skel->bss->pid = getpid();
1492 skel->bss->one_task = opts ? 1 : 0;
1493
1494 err = bpf_iter_task_vmas__load(skel);
1495 if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
1496 goto out;
1497
1498 skel->links.proc_maps = bpf_program__attach_iter(
1499 skel->progs.proc_maps, opts);
1500
1501 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1502 skel->links.proc_maps = NULL;
1503 goto out;
1504 }
1505
1506 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1507 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1508 goto out;
1509
1510 /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1511 * to trigger seq_file corner cases.
1512 */
1513 len = 0;
1514 while (len < CMP_BUFFER_SIZE) {
1515 err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1516 MIN(read_size, CMP_BUFFER_SIZE - len));
1517 if (!err)
1518 break;
1519 if (!ASSERT_GE(err, 0, "read_iter_fd"))
1520 goto out;
1521 len += err;
1522 }
1523 if (opts)
1524 ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task");
1525
1526 /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1527 snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1528 proc_maps_fd = open(maps_path, O_RDONLY);
1529 if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
1530 goto out;
1531 err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1532 if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
1533 goto out;
1534
1535 /* strip and compare the first line of the two files */
1536 str_strip_first_line(task_vma_output);
1537 str_strip_first_line(proc_maps_output);
1538
1539 ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
1540
1541 check_bpf_link_info(skel->progs.proc_maps);
1542
1543 out:
1544 close(proc_maps_fd);
1545 close(iter_fd);
1546 bpf_iter_task_vmas__destroy(skel);
1547 }
1548
test_task_vma_dead_task(void)1549 static void test_task_vma_dead_task(void)
1550 {
1551 struct bpf_iter_task_vmas *skel;
1552 int wstatus, child_pid = -1;
1553 time_t start_tm, cur_tm;
1554 int err, iter_fd = -1;
1555 int wait_sec = 3;
1556
1557 skel = bpf_iter_task_vmas__open();
1558 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
1559 return;
1560
1561 skel->bss->pid = getpid();
1562
1563 err = bpf_iter_task_vmas__load(skel);
1564 if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
1565 goto out;
1566
1567 skel->links.proc_maps = bpf_program__attach_iter(
1568 skel->progs.proc_maps, NULL);
1569
1570 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1571 skel->links.proc_maps = NULL;
1572 goto out;
1573 }
1574
1575 start_tm = time(NULL);
1576 cur_tm = start_tm;
1577
1578 child_pid = fork();
1579 if (child_pid == 0) {
1580 /* Fork short-lived processes in the background. */
1581 while (cur_tm < start_tm + wait_sec) {
1582 system("echo > /dev/null");
1583 cur_tm = time(NULL);
1584 }
1585 exit(0);
1586 }
1587
1588 if (!ASSERT_GE(child_pid, 0, "fork_child"))
1589 goto out;
1590
1591 while (cur_tm < start_tm + wait_sec) {
1592 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1593 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1594 goto out;
1595
1596 /* Drain all data from iter_fd. */
1597 while (cur_tm < start_tm + wait_sec) {
1598 err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE);
1599 if (!ASSERT_GE(err, 0, "read_iter_fd"))
1600 goto out;
1601
1602 cur_tm = time(NULL);
1603
1604 if (err == 0)
1605 break;
1606 }
1607
1608 close(iter_fd);
1609 iter_fd = -1;
1610 }
1611
1612 check_bpf_link_info(skel->progs.proc_maps);
1613
1614 out:
1615 waitpid(child_pid, &wstatus, 0);
1616 close(iter_fd);
1617 bpf_iter_task_vmas__destroy(skel);
1618 }
1619
test_bpf_sockmap_map_iter_fd(void)1620 void test_bpf_sockmap_map_iter_fd(void)
1621 {
1622 struct bpf_iter_sockmap *skel;
1623
1624 skel = bpf_iter_sockmap__open_and_load();
1625 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
1626 return;
1627
1628 do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
1629
1630 bpf_iter_sockmap__destroy(skel);
1631 }
1632
test_task_vma(void)1633 static void test_task_vma(void)
1634 {
1635 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1636 union bpf_iter_link_info linfo;
1637
1638 memset(&linfo, 0, sizeof(linfo));
1639 linfo.task.tid = getpid();
1640 opts.link_info = &linfo;
1641 opts.link_info_len = sizeof(linfo);
1642
1643 test_task_vma_common(&opts);
1644 test_task_vma_common(NULL);
1645 }
1646
1647 /* uprobe attach point */
trigger_func(int arg)1648 static noinline int trigger_func(int arg)
1649 {
1650 asm volatile ("");
1651 return arg + 1;
1652 }
1653
test_task_vma_offset_common(struct bpf_iter_attach_opts * opts,bool one_proc)1654 static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
1655 {
1656 struct bpf_iter_vma_offset *skel;
1657 char buf[16] = {};
1658 int iter_fd, len;
1659 int pgsz, shift;
1660
1661 skel = bpf_iter_vma_offset__open_and_load();
1662 if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load"))
1663 return;
1664
1665 skel->bss->pid = getpid();
1666 skel->bss->address = (uintptr_t)trigger_func;
1667 for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++)
1668 ;
1669 skel->bss->page_shift = shift;
1670
1671 skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
1672 if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
1673 goto exit;
1674
1675 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
1676 if (!ASSERT_GT(iter_fd, 0, "create_iter"))
1677 goto exit;
1678
1679 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1680 ;
1681 buf[15] = 0;
1682 ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
1683
1684 ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
1685 if (one_proc)
1686 ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1687 else
1688 ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1689
1690 close(iter_fd);
1691
1692 exit:
1693 bpf_iter_vma_offset__destroy(skel);
1694 }
1695
test_task_vma_offset(void)1696 static void test_task_vma_offset(void)
1697 {
1698 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1699 union bpf_iter_link_info linfo;
1700
1701 memset(&linfo, 0, sizeof(linfo));
1702 linfo.task.pid = getpid();
1703 opts.link_info = &linfo;
1704 opts.link_info_len = sizeof(linfo);
1705
1706 test_task_vma_offset_common(&opts, true);
1707
1708 linfo.task.pid = 0;
1709 linfo.task.tid = getpid();
1710 test_task_vma_offset_common(&opts, true);
1711
1712 test_task_vma_offset_common(NULL, false);
1713 }
1714
test_bpf_iter(void)1715 void test_bpf_iter(void)
1716 {
1717 ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init");
1718
1719 if (test__start_subtest("btf_id_or_null"))
1720 test_btf_id_or_null();
1721 if (test__start_subtest("ipv6_route"))
1722 test_ipv6_route();
1723 if (test__start_subtest("netlink"))
1724 test_netlink();
1725 if (test__start_subtest("bpf_map"))
1726 test_bpf_map();
1727 if (test__start_subtest("task_tid"))
1728 test_task_tid();
1729 if (test__start_subtest("task_pid"))
1730 test_task_pid();
1731 if (test__start_subtest("task_pidfd"))
1732 test_task_pidfd();
1733 if (test__start_subtest("task_sleepable"))
1734 test_task_sleepable();
1735 if (test__start_subtest("task_stack"))
1736 test_task_stack();
1737 if (test__start_subtest("task_file"))
1738 test_task_file();
1739 if (test__start_subtest("task_vma"))
1740 test_task_vma();
1741 if (test__start_subtest("task_vma_dead_task"))
1742 test_task_vma_dead_task();
1743 if (test__start_subtest("task_btf"))
1744 test_task_btf();
1745 if (test__start_subtest("tcp4"))
1746 test_tcp4();
1747 if (test__start_subtest("tcp6"))
1748 test_tcp6();
1749 if (test__start_subtest("udp4"))
1750 test_udp4();
1751 if (test__start_subtest("udp6"))
1752 test_udp6();
1753 if (test__start_subtest("unix"))
1754 test_unix();
1755 if (test__start_subtest("anon"))
1756 test_anon_iter(false);
1757 if (test__start_subtest("anon-read-one-char"))
1758 test_anon_iter(true);
1759 if (test__start_subtest("file"))
1760 test_file_iter();
1761 if (test__start_subtest("overflow"))
1762 test_overflow(false, false);
1763 if (test__start_subtest("overflow-e2big"))
1764 test_overflow(true, false);
1765 if (test__start_subtest("prog-ret-1"))
1766 test_overflow(false, true);
1767 if (test__start_subtest("bpf_hash_map"))
1768 test_bpf_hash_map();
1769 if (test__start_subtest("bpf_percpu_hash_map"))
1770 test_bpf_percpu_hash_map();
1771 if (test__start_subtest("bpf_array_map"))
1772 test_bpf_array_map();
1773 if (test__start_subtest("bpf_array_map_iter_fd"))
1774 test_bpf_array_map_iter_fd();
1775 if (test__start_subtest("bpf_percpu_array_map"))
1776 test_bpf_percpu_array_map();
1777 if (test__start_subtest("bpf_sk_storage_map"))
1778 test_bpf_sk_storage_map();
1779 if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
1780 test_bpf_sk_storage_map_iter_fd();
1781 if (test__start_subtest("bpf_sk_storage_delete"))
1782 test_bpf_sk_storage_delete();
1783 if (test__start_subtest("bpf_sk_storage_get"))
1784 test_bpf_sk_storage_get();
1785 if (test__start_subtest("rdonly-buf-out-of-bound"))
1786 test_rdonly_buf_out_of_bound();
1787 if (test__start_subtest("buf-neg-offset"))
1788 test_buf_neg_offset();
1789 if (test__start_subtest("link-iter"))
1790 test_link_iter();
1791 if (test__start_subtest("ksym"))
1792 test_ksym_iter();
1793 if (test__start_subtest("bpf_sockmap_map_iter_fd"))
1794 test_bpf_sockmap_map_iter_fd();
1795 if (test__start_subtest("vma_offset"))
1796 test_task_vma_offset();
1797 }
1798