xref: /linux/tools/testing/selftests/bpf/prog_tests/bpf_iter.c (revision 9025cebf12d1763de36d5e09e2b0a1e4f9b13b28)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include "bpf_iter_ipv6_route.skel.h"
5 #include "bpf_iter_netlink.skel.h"
6 #include "bpf_iter_bpf_map.skel.h"
7 #include "bpf_iter_task.skel.h"
8 #include "bpf_iter_task_stack.skel.h"
9 #include "bpf_iter_task_file.skel.h"
10 #include "bpf_iter_task_vma.skel.h"
11 #include "bpf_iter_task_btf.skel.h"
12 #include "bpf_iter_tcp4.skel.h"
13 #include "bpf_iter_tcp6.skel.h"
14 #include "bpf_iter_udp4.skel.h"
15 #include "bpf_iter_udp6.skel.h"
16 #include "bpf_iter_unix.skel.h"
17 #include "bpf_iter_test_kern1.skel.h"
18 #include "bpf_iter_test_kern2.skel.h"
19 #include "bpf_iter_test_kern3.skel.h"
20 #include "bpf_iter_test_kern4.skel.h"
21 #include "bpf_iter_bpf_hash_map.skel.h"
22 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
23 #include "bpf_iter_bpf_array_map.skel.h"
24 #include "bpf_iter_bpf_percpu_array_map.skel.h"
25 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
26 #include "bpf_iter_bpf_sk_storage_map.skel.h"
27 #include "bpf_iter_test_kern5.skel.h"
28 #include "bpf_iter_test_kern6.skel.h"
29 #include "bpf_iter_bpf_link.skel.h"
30 #include "bpf_iter_ksym.skel.h"
31 #include "bpf_iter_sockmap.skel.h"
32 
33 static int duration;
34 
35 static void test_btf_id_or_null(void)
36 {
37 	struct bpf_iter_test_kern3 *skel;
38 
39 	skel = bpf_iter_test_kern3__open_and_load();
40 	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
41 		bpf_iter_test_kern3__destroy(skel);
42 		return;
43 	}
44 }
45 
46 static void do_dummy_read(struct bpf_program *prog)
47 {
48 	struct bpf_link *link;
49 	char buf[16] = {};
50 	int iter_fd, len;
51 
52 	link = bpf_program__attach_iter(prog, NULL);
53 	if (!ASSERT_OK_PTR(link, "attach_iter"))
54 		return;
55 
56 	iter_fd = bpf_iter_create(bpf_link__fd(link));
57 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
58 		goto free_link;
59 
60 	/* not check contents, but ensure read() ends without error */
61 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
62 		;
63 	CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
64 
65 	close(iter_fd);
66 
67 free_link:
68 	bpf_link__destroy(link);
69 }
70 
71 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
72 				struct bpf_map *map)
73 {
74 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
75 	union bpf_iter_link_info linfo;
76 	struct bpf_link *link;
77 	char buf[16] = {};
78 	int iter_fd, len;
79 
80 	memset(&linfo, 0, sizeof(linfo));
81 	linfo.map.map_fd = bpf_map__fd(map);
82 	opts.link_info = &linfo;
83 	opts.link_info_len = sizeof(linfo);
84 	link = bpf_program__attach_iter(prog, &opts);
85 	if (!ASSERT_OK_PTR(link, "attach_map_iter"))
86 		return;
87 
88 	iter_fd = bpf_iter_create(bpf_link__fd(link));
89 	if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
90 		bpf_link__destroy(link);
91 		return;
92 	}
93 
94 	/* Close link and map fd prematurely */
95 	bpf_link__destroy(link);
96 	bpf_object__destroy_skeleton(*skel);
97 	*skel = NULL;
98 
99 	/* Try to let map free work to run first if map is freed */
100 	usleep(100);
101 	/* Memory used by both sock map and sock local storage map are
102 	 * freed after two synchronize_rcu() calls, so wait for it
103 	 */
104 	kern_sync_rcu();
105 	kern_sync_rcu();
106 
107 	/* Read after both map fd and link fd are closed */
108 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
109 		;
110 	ASSERT_GE(len, 0, "read_iterator");
111 
112 	close(iter_fd);
113 }
114 
115 static int read_fd_into_buffer(int fd, char *buf, int size)
116 {
117 	int bufleft = size;
118 	int len;
119 
120 	do {
121 		len = read(fd, buf, bufleft);
122 		if (len > 0) {
123 			buf += len;
124 			bufleft -= len;
125 		}
126 	} while (len > 0);
127 
128 	return len < 0 ? len : size - bufleft;
129 }
130 
131 static void test_ipv6_route(void)
132 {
133 	struct bpf_iter_ipv6_route *skel;
134 
135 	skel = bpf_iter_ipv6_route__open_and_load();
136 	if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
137 		return;
138 
139 	do_dummy_read(skel->progs.dump_ipv6_route);
140 
141 	bpf_iter_ipv6_route__destroy(skel);
142 }
143 
144 static void test_netlink(void)
145 {
146 	struct bpf_iter_netlink *skel;
147 
148 	skel = bpf_iter_netlink__open_and_load();
149 	if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
150 		return;
151 
152 	do_dummy_read(skel->progs.dump_netlink);
153 
154 	bpf_iter_netlink__destroy(skel);
155 }
156 
157 static void test_bpf_map(void)
158 {
159 	struct bpf_iter_bpf_map *skel;
160 
161 	skel = bpf_iter_bpf_map__open_and_load();
162 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
163 		return;
164 
165 	do_dummy_read(skel->progs.dump_bpf_map);
166 
167 	bpf_iter_bpf_map__destroy(skel);
168 }
169 
170 static void test_task(void)
171 {
172 	struct bpf_iter_task *skel;
173 
174 	skel = bpf_iter_task__open_and_load();
175 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
176 		return;
177 
178 	do_dummy_read(skel->progs.dump_task);
179 
180 	bpf_iter_task__destroy(skel);
181 }
182 
183 static void test_task_sleepable(void)
184 {
185 	struct bpf_iter_task *skel;
186 
187 	skel = bpf_iter_task__open_and_load();
188 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
189 		return;
190 
191 	do_dummy_read(skel->progs.dump_task_sleepable);
192 
193 	ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
194 		  "num_expected_failure_copy_from_user_task");
195 	ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
196 		  "num_success_copy_from_user_task");
197 
198 	bpf_iter_task__destroy(skel);
199 }
200 
201 static void test_task_stack(void)
202 {
203 	struct bpf_iter_task_stack *skel;
204 
205 	skel = bpf_iter_task_stack__open_and_load();
206 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
207 		return;
208 
209 	do_dummy_read(skel->progs.dump_task_stack);
210 	do_dummy_read(skel->progs.get_task_user_stacks);
211 
212 	bpf_iter_task_stack__destroy(skel);
213 }
214 
215 static void *do_nothing(void *arg)
216 {
217 	pthread_exit(arg);
218 }
219 
220 static void test_task_file(void)
221 {
222 	struct bpf_iter_task_file *skel;
223 	pthread_t thread_id;
224 	void *ret;
225 
226 	skel = bpf_iter_task_file__open_and_load();
227 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
228 		return;
229 
230 	skel->bss->tgid = getpid();
231 
232 	if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
233 		  "pthread_create"))
234 		goto done;
235 
236 	do_dummy_read(skel->progs.dump_task_file);
237 
238 	if (!ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
239 		  "pthread_join"))
240 		goto done;
241 
242 	ASSERT_EQ(skel->bss->count, 0, "check_count");
243 
244 done:
245 	bpf_iter_task_file__destroy(skel);
246 }
247 
248 #define TASKBUFSZ		32768
249 
250 static char taskbuf[TASKBUFSZ];
251 
252 static int do_btf_read(struct bpf_iter_task_btf *skel)
253 {
254 	struct bpf_program *prog = skel->progs.dump_task_struct;
255 	struct bpf_iter_task_btf__bss *bss = skel->bss;
256 	int iter_fd = -1, err;
257 	struct bpf_link *link;
258 	char *buf = taskbuf;
259 	int ret = 0;
260 
261 	link = bpf_program__attach_iter(prog, NULL);
262 	if (!ASSERT_OK_PTR(link, "attach_iter"))
263 		return ret;
264 
265 	iter_fd = bpf_iter_create(bpf_link__fd(link));
266 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
267 		goto free_link;
268 
269 	err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
270 	if (bss->skip) {
271 		printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
272 		ret = 1;
273 		test__skip();
274 		goto free_link;
275 	}
276 
277 	if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
278 		goto free_link;
279 
280 	ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
281 	      "check for btf representation of task_struct in iter data");
282 free_link:
283 	if (iter_fd > 0)
284 		close(iter_fd);
285 	bpf_link__destroy(link);
286 	return ret;
287 }
288 
289 static void test_task_btf(void)
290 {
291 	struct bpf_iter_task_btf__bss *bss;
292 	struct bpf_iter_task_btf *skel;
293 	int ret;
294 
295 	skel = bpf_iter_task_btf__open_and_load();
296 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
297 		return;
298 
299 	bss = skel->bss;
300 
301 	ret = do_btf_read(skel);
302 	if (ret)
303 		goto cleanup;
304 
305 	if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
306 		goto cleanup;
307 
308 	ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
309 
310 cleanup:
311 	bpf_iter_task_btf__destroy(skel);
312 }
313 
314 static void test_tcp4(void)
315 {
316 	struct bpf_iter_tcp4 *skel;
317 
318 	skel = bpf_iter_tcp4__open_and_load();
319 	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
320 		return;
321 
322 	do_dummy_read(skel->progs.dump_tcp4);
323 
324 	bpf_iter_tcp4__destroy(skel);
325 }
326 
327 static void test_tcp6(void)
328 {
329 	struct bpf_iter_tcp6 *skel;
330 
331 	skel = bpf_iter_tcp6__open_and_load();
332 	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
333 		return;
334 
335 	do_dummy_read(skel->progs.dump_tcp6);
336 
337 	bpf_iter_tcp6__destroy(skel);
338 }
339 
340 static void test_udp4(void)
341 {
342 	struct bpf_iter_udp4 *skel;
343 
344 	skel = bpf_iter_udp4__open_and_load();
345 	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
346 		return;
347 
348 	do_dummy_read(skel->progs.dump_udp4);
349 
350 	bpf_iter_udp4__destroy(skel);
351 }
352 
353 static void test_udp6(void)
354 {
355 	struct bpf_iter_udp6 *skel;
356 
357 	skel = bpf_iter_udp6__open_and_load();
358 	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
359 		return;
360 
361 	do_dummy_read(skel->progs.dump_udp6);
362 
363 	bpf_iter_udp6__destroy(skel);
364 }
365 
366 static void test_unix(void)
367 {
368 	struct bpf_iter_unix *skel;
369 
370 	skel = bpf_iter_unix__open_and_load();
371 	if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
372 		return;
373 
374 	do_dummy_read(skel->progs.dump_unix);
375 
376 	bpf_iter_unix__destroy(skel);
377 }
378 
379 /* The expected string is less than 16 bytes */
380 static int do_read_with_fd(int iter_fd, const char *expected,
381 			   bool read_one_char)
382 {
383 	int len, read_buf_len, start;
384 	char buf[16] = {};
385 
386 	read_buf_len = read_one_char ? 1 : 16;
387 	start = 0;
388 	while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
389 		start += len;
390 		if (CHECK(start >= 16, "read", "read len %d\n", len))
391 			return -1;
392 		read_buf_len = read_one_char ? 1 : 16 - start;
393 	}
394 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
395 		return -1;
396 
397 	if (!ASSERT_STREQ(buf, expected, "read"))
398 		return -1;
399 
400 	return 0;
401 }
402 
403 static void test_anon_iter(bool read_one_char)
404 {
405 	struct bpf_iter_test_kern1 *skel;
406 	struct bpf_link *link;
407 	int iter_fd, err;
408 
409 	skel = bpf_iter_test_kern1__open_and_load();
410 	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
411 		return;
412 
413 	err = bpf_iter_test_kern1__attach(skel);
414 	if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
415 		goto out;
416 	}
417 
418 	link = skel->links.dump_task;
419 	iter_fd = bpf_iter_create(bpf_link__fd(link));
420 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
421 		goto out;
422 
423 	do_read_with_fd(iter_fd, "abcd", read_one_char);
424 	close(iter_fd);
425 
426 out:
427 	bpf_iter_test_kern1__destroy(skel);
428 }
429 
430 static int do_read(const char *path, const char *expected)
431 {
432 	int err, iter_fd;
433 
434 	iter_fd = open(path, O_RDONLY);
435 	if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
436 		  path, strerror(errno)))
437 		return -1;
438 
439 	err = do_read_with_fd(iter_fd, expected, false);
440 	close(iter_fd);
441 	return err;
442 }
443 
444 static void test_file_iter(void)
445 {
446 	const char *path = "/sys/fs/bpf/bpf_iter_test1";
447 	struct bpf_iter_test_kern1 *skel1;
448 	struct bpf_iter_test_kern2 *skel2;
449 	struct bpf_link *link;
450 	int err;
451 
452 	skel1 = bpf_iter_test_kern1__open_and_load();
453 	if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
454 		return;
455 
456 	link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
457 	if (!ASSERT_OK_PTR(link, "attach_iter"))
458 		goto out;
459 
460 	/* unlink this path if it exists. */
461 	unlink(path);
462 
463 	err = bpf_link__pin(link, path);
464 	if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
465 		goto free_link;
466 
467 	err = do_read(path, "abcd");
468 	if (err)
469 		goto unlink_path;
470 
471 	/* file based iterator seems working fine. Let us a link update
472 	 * of the underlying link and `cat` the iterator again, its content
473 	 * should change.
474 	 */
475 	skel2 = bpf_iter_test_kern2__open_and_load();
476 	if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
477 		goto unlink_path;
478 
479 	err = bpf_link__update_program(link, skel2->progs.dump_task);
480 	if (!ASSERT_OK(err, "update_prog"))
481 		goto destroy_skel2;
482 
483 	do_read(path, "ABCD");
484 
485 destroy_skel2:
486 	bpf_iter_test_kern2__destroy(skel2);
487 unlink_path:
488 	unlink(path);
489 free_link:
490 	bpf_link__destroy(link);
491 out:
492 	bpf_iter_test_kern1__destroy(skel1);
493 }
494 
495 static void test_overflow(bool test_e2big_overflow, bool ret1)
496 {
497 	__u32 map_info_len, total_read_len, expected_read_len;
498 	int err, iter_fd, map1_fd, map2_fd, len;
499 	struct bpf_map_info map_info = {};
500 	struct bpf_iter_test_kern4 *skel;
501 	struct bpf_link *link;
502 	__u32 iter_size;
503 	char *buf;
504 
505 	skel = bpf_iter_test_kern4__open();
506 	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
507 		return;
508 
509 	/* create two maps: bpf program will only do bpf_seq_write
510 	 * for these two maps. The goal is one map output almost
511 	 * fills seq_file buffer and then the other will trigger
512 	 * overflow and needs restart.
513 	 */
514 	map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
515 	if (CHECK(map1_fd < 0, "bpf_map_create",
516 		  "map_creation failed: %s\n", strerror(errno)))
517 		goto out;
518 	map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
519 	if (CHECK(map2_fd < 0, "bpf_map_create",
520 		  "map_creation failed: %s\n", strerror(errno)))
521 		goto free_map1;
522 
523 	/* bpf_seq_printf kernel buffer is 8 pages, so one map
524 	 * bpf_seq_write will mostly fill it, and the other map
525 	 * will partially fill and then trigger overflow and need
526 	 * bpf_seq_read restart.
527 	 */
528 	iter_size = sysconf(_SC_PAGE_SIZE) << 3;
529 
530 	if (test_e2big_overflow) {
531 		skel->rodata->print_len = (iter_size + 8) / 8;
532 		expected_read_len = 2 * (iter_size + 8);
533 	} else if (!ret1) {
534 		skel->rodata->print_len = (iter_size - 8) / 8;
535 		expected_read_len = 2 * (iter_size - 8);
536 	} else {
537 		skel->rodata->print_len = 1;
538 		expected_read_len = 2 * 8;
539 	}
540 	skel->rodata->ret1 = ret1;
541 
542 	if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
543 		  "bpf_iter_test_kern4__load"))
544 		goto free_map2;
545 
546 	/* setup filtering map_id in bpf program */
547 	map_info_len = sizeof(map_info);
548 	err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
549 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
550 		  strerror(errno)))
551 		goto free_map2;
552 	skel->bss->map1_id = map_info.id;
553 
554 	err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
555 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
556 		  strerror(errno)))
557 		goto free_map2;
558 	skel->bss->map2_id = map_info.id;
559 
560 	link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
561 	if (!ASSERT_OK_PTR(link, "attach_iter"))
562 		goto free_map2;
563 
564 	iter_fd = bpf_iter_create(bpf_link__fd(link));
565 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
566 		goto free_link;
567 
568 	buf = malloc(expected_read_len);
569 	if (!buf)
570 		goto close_iter;
571 
572 	/* do read */
573 	total_read_len = 0;
574 	if (test_e2big_overflow) {
575 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
576 			total_read_len += len;
577 
578 		CHECK(len != -1 || errno != E2BIG, "read",
579 		      "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
580 			  len, strerror(errno));
581 		goto free_buf;
582 	} else if (!ret1) {
583 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
584 			total_read_len += len;
585 
586 		if (CHECK(len < 0, "read", "read failed: %s\n",
587 			  strerror(errno)))
588 			goto free_buf;
589 	} else {
590 		do {
591 			len = read(iter_fd, buf, expected_read_len);
592 			if (len > 0)
593 				total_read_len += len;
594 		} while (len > 0 || len == -EAGAIN);
595 
596 		if (CHECK(len < 0, "read", "read failed: %s\n",
597 			  strerror(errno)))
598 			goto free_buf;
599 	}
600 
601 	if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
602 		goto free_buf;
603 
604 	if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
605 		goto free_buf;
606 
607 	if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
608 		goto free_buf;
609 
610 	ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
611 
612 free_buf:
613 	free(buf);
614 close_iter:
615 	close(iter_fd);
616 free_link:
617 	bpf_link__destroy(link);
618 free_map2:
619 	close(map2_fd);
620 free_map1:
621 	close(map1_fd);
622 out:
623 	bpf_iter_test_kern4__destroy(skel);
624 }
625 
626 static void test_bpf_hash_map(void)
627 {
628 	__u32 expected_key_a = 0, expected_key_b = 0;
629 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
630 	struct bpf_iter_bpf_hash_map *skel;
631 	int err, i, len, map_fd, iter_fd;
632 	union bpf_iter_link_info linfo;
633 	__u64 val, expected_val = 0;
634 	struct bpf_link *link;
635 	struct key_t {
636 		int a;
637 		int b;
638 		int c;
639 	} key;
640 	char buf[64];
641 
642 	skel = bpf_iter_bpf_hash_map__open();
643 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
644 		return;
645 
646 	skel->bss->in_test_mode = true;
647 
648 	err = bpf_iter_bpf_hash_map__load(skel);
649 	if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
650 		goto out;
651 
652 	/* iterator with hashmap2 and hashmap3 should fail */
653 	memset(&linfo, 0, sizeof(linfo));
654 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
655 	opts.link_info = &linfo;
656 	opts.link_info_len = sizeof(linfo);
657 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
658 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
659 		goto out;
660 
661 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
662 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
663 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
664 		goto out;
665 
666 	/* hashmap1 should be good, update map values here */
667 	map_fd = bpf_map__fd(skel->maps.hashmap1);
668 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
669 		key.a = i + 1;
670 		key.b = i + 2;
671 		key.c = i + 3;
672 		val = i + 4;
673 		expected_key_a += key.a;
674 		expected_key_b += key.b;
675 		expected_val += val;
676 
677 		err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
678 		if (!ASSERT_OK(err, "map_update"))
679 			goto out;
680 	}
681 
682 	/* Sleepable program is prohibited for hash map iterator */
683 	linfo.map.map_fd = map_fd;
684 	link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
685 	if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
686 		goto out;
687 
688 	linfo.map.map_fd = map_fd;
689 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
690 	if (!ASSERT_OK_PTR(link, "attach_iter"))
691 		goto out;
692 
693 	iter_fd = bpf_iter_create(bpf_link__fd(link));
694 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
695 		goto free_link;
696 
697 	/* do some tests */
698 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
699 		;
700 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
701 		goto close_iter;
702 
703 	/* test results */
704 	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
705 		goto close_iter;
706 	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
707 		goto close_iter;
708 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
709 		goto close_iter;
710 
711 close_iter:
712 	close(iter_fd);
713 free_link:
714 	bpf_link__destroy(link);
715 out:
716 	bpf_iter_bpf_hash_map__destroy(skel);
717 }
718 
719 static void test_bpf_percpu_hash_map(void)
720 {
721 	__u32 expected_key_a = 0, expected_key_b = 0;
722 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
723 	struct bpf_iter_bpf_percpu_hash_map *skel;
724 	int err, i, j, len, map_fd, iter_fd;
725 	union bpf_iter_link_info linfo;
726 	__u32 expected_val = 0;
727 	struct bpf_link *link;
728 	struct key_t {
729 		int a;
730 		int b;
731 		int c;
732 	} key;
733 	char buf[64];
734 	void *val;
735 
736 	skel = bpf_iter_bpf_percpu_hash_map__open();
737 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
738 		return;
739 
740 	skel->rodata->num_cpus = bpf_num_possible_cpus();
741 	val = malloc(8 * bpf_num_possible_cpus());
742 
743 	err = bpf_iter_bpf_percpu_hash_map__load(skel);
744 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
745 		goto out;
746 
747 	/* update map values here */
748 	map_fd = bpf_map__fd(skel->maps.hashmap1);
749 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
750 		key.a = i + 1;
751 		key.b = i + 2;
752 		key.c = i + 3;
753 		expected_key_a += key.a;
754 		expected_key_b += key.b;
755 
756 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
757 			*(__u32 *)(val + j * 8) = i + j;
758 			expected_val += i + j;
759 		}
760 
761 		err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
762 		if (!ASSERT_OK(err, "map_update"))
763 			goto out;
764 	}
765 
766 	memset(&linfo, 0, sizeof(linfo));
767 	linfo.map.map_fd = map_fd;
768 	opts.link_info = &linfo;
769 	opts.link_info_len = sizeof(linfo);
770 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
771 	if (!ASSERT_OK_PTR(link, "attach_iter"))
772 		goto out;
773 
774 	iter_fd = bpf_iter_create(bpf_link__fd(link));
775 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
776 		goto free_link;
777 
778 	/* do some tests */
779 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
780 		;
781 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
782 		goto close_iter;
783 
784 	/* test results */
785 	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
786 		goto close_iter;
787 	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
788 		goto close_iter;
789 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
790 		goto close_iter;
791 
792 close_iter:
793 	close(iter_fd);
794 free_link:
795 	bpf_link__destroy(link);
796 out:
797 	bpf_iter_bpf_percpu_hash_map__destroy(skel);
798 	free(val);
799 }
800 
801 static void test_bpf_array_map(void)
802 {
803 	__u64 val, expected_val = 0, res_first_val, first_val = 0;
804 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
805 	__u32 expected_key = 0, res_first_key;
806 	struct bpf_iter_bpf_array_map *skel;
807 	union bpf_iter_link_info linfo;
808 	int err, i, map_fd, iter_fd;
809 	struct bpf_link *link;
810 	char buf[64] = {};
811 	int len, start;
812 
813 	skel = bpf_iter_bpf_array_map__open_and_load();
814 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
815 		return;
816 
817 	map_fd = bpf_map__fd(skel->maps.arraymap1);
818 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
819 		val = i + 4;
820 		expected_key += i;
821 		expected_val += val;
822 
823 		if (i == 0)
824 			first_val = val;
825 
826 		err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
827 		if (!ASSERT_OK(err, "map_update"))
828 			goto out;
829 	}
830 
831 	memset(&linfo, 0, sizeof(linfo));
832 	linfo.map.map_fd = map_fd;
833 	opts.link_info = &linfo;
834 	opts.link_info_len = sizeof(linfo);
835 	link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
836 	if (!ASSERT_OK_PTR(link, "attach_iter"))
837 		goto out;
838 
839 	iter_fd = bpf_iter_create(bpf_link__fd(link));
840 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
841 		goto free_link;
842 
843 	/* do some tests */
844 	start = 0;
845 	while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
846 		start += len;
847 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
848 		goto close_iter;
849 
850 	/* test results */
851 	res_first_key = *(__u32 *)buf;
852 	res_first_val = *(__u64 *)(buf + sizeof(__u32));
853 	if (CHECK(res_first_key != 0 || res_first_val != first_val,
854 		  "bpf_seq_write",
855 		  "seq_write failure: first key %u vs expected 0, "
856 		  " first value %llu vs expected %llu\n",
857 		  res_first_key, res_first_val, first_val))
858 		goto close_iter;
859 
860 	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
861 		goto close_iter;
862 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
863 		goto close_iter;
864 
865 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
866 		err = bpf_map_lookup_elem(map_fd, &i, &val);
867 		if (!ASSERT_OK(err, "map_lookup"))
868 			goto out;
869 		if (!ASSERT_EQ(i, val, "invalid_val"))
870 			goto out;
871 	}
872 
873 close_iter:
874 	close(iter_fd);
875 free_link:
876 	bpf_link__destroy(link);
877 out:
878 	bpf_iter_bpf_array_map__destroy(skel);
879 }
880 
881 static void test_bpf_array_map_iter_fd(void)
882 {
883 	struct bpf_iter_bpf_array_map *skel;
884 
885 	skel = bpf_iter_bpf_array_map__open_and_load();
886 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
887 		return;
888 
889 	do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
890 			    skel->maps.arraymap1);
891 
892 	bpf_iter_bpf_array_map__destroy(skel);
893 }
894 
895 static void test_bpf_percpu_array_map(void)
896 {
897 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
898 	struct bpf_iter_bpf_percpu_array_map *skel;
899 	__u32 expected_key = 0, expected_val = 0;
900 	union bpf_iter_link_info linfo;
901 	int err, i, j, map_fd, iter_fd;
902 	struct bpf_link *link;
903 	char buf[64];
904 	void *val;
905 	int len;
906 
907 	skel = bpf_iter_bpf_percpu_array_map__open();
908 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
909 		return;
910 
911 	skel->rodata->num_cpus = bpf_num_possible_cpus();
912 	val = malloc(8 * bpf_num_possible_cpus());
913 
914 	err = bpf_iter_bpf_percpu_array_map__load(skel);
915 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
916 		goto out;
917 
918 	/* update map values here */
919 	map_fd = bpf_map__fd(skel->maps.arraymap1);
920 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
921 		expected_key += i;
922 
923 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
924 			*(__u32 *)(val + j * 8) = i + j;
925 			expected_val += i + j;
926 		}
927 
928 		err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
929 		if (!ASSERT_OK(err, "map_update"))
930 			goto out;
931 	}
932 
933 	memset(&linfo, 0, sizeof(linfo));
934 	linfo.map.map_fd = map_fd;
935 	opts.link_info = &linfo;
936 	opts.link_info_len = sizeof(linfo);
937 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
938 	if (!ASSERT_OK_PTR(link, "attach_iter"))
939 		goto out;
940 
941 	iter_fd = bpf_iter_create(bpf_link__fd(link));
942 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
943 		goto free_link;
944 
945 	/* do some tests */
946 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
947 		;
948 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
949 		goto close_iter;
950 
951 	/* test results */
952 	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
953 		goto close_iter;
954 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
955 		goto close_iter;
956 
957 close_iter:
958 	close(iter_fd);
959 free_link:
960 	bpf_link__destroy(link);
961 out:
962 	bpf_iter_bpf_percpu_array_map__destroy(skel);
963 	free(val);
964 }
965 
966 /* An iterator program deletes all local storage in a map. */
967 static void test_bpf_sk_storage_delete(void)
968 {
969 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
970 	struct bpf_iter_bpf_sk_storage_helpers *skel;
971 	union bpf_iter_link_info linfo;
972 	int err, len, map_fd, iter_fd;
973 	struct bpf_link *link;
974 	int sock_fd = -1;
975 	__u32 val = 42;
976 	char buf[64];
977 
978 	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
979 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
980 		return;
981 
982 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
983 
984 	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
985 	if (!ASSERT_GE(sock_fd, 0, "socket"))
986 		goto out;
987 	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
988 	if (!ASSERT_OK(err, "map_update"))
989 		goto out;
990 
991 	memset(&linfo, 0, sizeof(linfo));
992 	linfo.map.map_fd = map_fd;
993 	opts.link_info = &linfo;
994 	opts.link_info_len = sizeof(linfo);
995 	link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
996 					&opts);
997 	if (!ASSERT_OK_PTR(link, "attach_iter"))
998 		goto out;
999 
1000 	iter_fd = bpf_iter_create(bpf_link__fd(link));
1001 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1002 		goto free_link;
1003 
1004 	/* do some tests */
1005 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1006 		;
1007 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1008 		goto close_iter;
1009 
1010 	/* test results */
1011 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1012 	if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
1013 		  "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
1014 		goto close_iter;
1015 
1016 close_iter:
1017 	close(iter_fd);
1018 free_link:
1019 	bpf_link__destroy(link);
1020 out:
1021 	if (sock_fd >= 0)
1022 		close(sock_fd);
1023 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1024 }
1025 
1026 /* This creates a socket and its local storage. It then runs a task_iter BPF
1027  * program that replaces the existing socket local storage with the tgid of the
1028  * only task owning a file descriptor to this socket, this process, prog_tests.
1029  * It then runs a tcp socket iterator that negates the value in the existing
1030  * socket local storage, the test verifies that the resulting value is -pid.
1031  */
1032 static void test_bpf_sk_storage_get(void)
1033 {
1034 	struct bpf_iter_bpf_sk_storage_helpers *skel;
1035 	int err, map_fd, val = -1;
1036 	int sock_fd = -1;
1037 
1038 	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1039 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1040 		return;
1041 
1042 	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1043 	if (!ASSERT_GE(sock_fd, 0, "socket"))
1044 		goto out;
1045 
1046 	err = listen(sock_fd, 1);
1047 	if (!ASSERT_OK(err, "listen"))
1048 		goto close_socket;
1049 
1050 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1051 
1052 	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1053 	if (!ASSERT_OK(err, "bpf_map_update_elem"))
1054 		goto close_socket;
1055 
1056 	do_dummy_read(skel->progs.fill_socket_owner);
1057 
1058 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1059 	if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
1060 	    "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1061 	    getpid(), val, err))
1062 		goto close_socket;
1063 
1064 	do_dummy_read(skel->progs.negate_socket_local_storage);
1065 
1066 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1067 	CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1068 	      "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1069 	      -getpid(), val, err);
1070 
1071 close_socket:
1072 	close(sock_fd);
1073 out:
1074 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1075 }
1076 
1077 static void test_bpf_sk_stoarge_map_iter_fd(void)
1078 {
1079 	struct bpf_iter_bpf_sk_storage_map *skel;
1080 
1081 	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1082 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1083 		return;
1084 
1085 	do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
1086 			    skel->maps.sk_stg_map);
1087 
1088 	bpf_iter_bpf_sk_storage_map__destroy(skel);
1089 }
1090 
1091 static void test_bpf_sk_storage_map(void)
1092 {
1093 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1094 	int err, i, len, map_fd, iter_fd, num_sockets;
1095 	struct bpf_iter_bpf_sk_storage_map *skel;
1096 	union bpf_iter_link_info linfo;
1097 	int sock_fd[3] = {-1, -1, -1};
1098 	__u32 val, expected_val = 0;
1099 	struct bpf_link *link;
1100 	char buf[64];
1101 
1102 	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1103 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1104 		return;
1105 
1106 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1107 	num_sockets = ARRAY_SIZE(sock_fd);
1108 	for (i = 0; i < num_sockets; i++) {
1109 		sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1110 		if (!ASSERT_GE(sock_fd[i], 0, "socket"))
1111 			goto out;
1112 
1113 		val = i + 1;
1114 		expected_val += val;
1115 
1116 		err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1117 					  BPF_NOEXIST);
1118 		if (!ASSERT_OK(err, "map_update"))
1119 			goto out;
1120 	}
1121 
1122 	memset(&linfo, 0, sizeof(linfo));
1123 	linfo.map.map_fd = map_fd;
1124 	opts.link_info = &linfo;
1125 	opts.link_info_len = sizeof(linfo);
1126 	link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
1127 	err = libbpf_get_error(link);
1128 	if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
1129 		if (!err)
1130 			bpf_link__destroy(link);
1131 		goto out;
1132 	}
1133 
1134 	link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
1135 	if (!ASSERT_OK_PTR(link, "attach_iter"))
1136 		goto out;
1137 
1138 	iter_fd = bpf_iter_create(bpf_link__fd(link));
1139 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1140 		goto free_link;
1141 
1142 	skel->bss->to_add_val = time(NULL);
1143 	/* do some tests */
1144 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1145 		;
1146 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1147 		goto close_iter;
1148 
1149 	/* test results */
1150 	if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
1151 		goto close_iter;
1152 
1153 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1154 		goto close_iter;
1155 
1156 	for (i = 0; i < num_sockets; i++) {
1157 		err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
1158 		if (!ASSERT_OK(err, "map_lookup") ||
1159 		    !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
1160 			break;
1161 	}
1162 
1163 close_iter:
1164 	close(iter_fd);
1165 free_link:
1166 	bpf_link__destroy(link);
1167 out:
1168 	for (i = 0; i < num_sockets; i++) {
1169 		if (sock_fd[i] >= 0)
1170 			close(sock_fd[i]);
1171 	}
1172 	bpf_iter_bpf_sk_storage_map__destroy(skel);
1173 }
1174 
1175 static void test_rdonly_buf_out_of_bound(void)
1176 {
1177 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1178 	struct bpf_iter_test_kern5 *skel;
1179 	union bpf_iter_link_info linfo;
1180 	struct bpf_link *link;
1181 
1182 	skel = bpf_iter_test_kern5__open_and_load();
1183 	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
1184 		return;
1185 
1186 	memset(&linfo, 0, sizeof(linfo));
1187 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1188 	opts.link_info = &linfo;
1189 	opts.link_info_len = sizeof(linfo);
1190 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1191 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
1192 		bpf_link__destroy(link);
1193 
1194 	bpf_iter_test_kern5__destroy(skel);
1195 }
1196 
1197 static void test_buf_neg_offset(void)
1198 {
1199 	struct bpf_iter_test_kern6 *skel;
1200 
1201 	skel = bpf_iter_test_kern6__open_and_load();
1202 	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
1203 		bpf_iter_test_kern6__destroy(skel);
1204 }
1205 
1206 static void test_link_iter(void)
1207 {
1208 	struct bpf_iter_bpf_link *skel;
1209 
1210 	skel = bpf_iter_bpf_link__open_and_load();
1211 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
1212 		return;
1213 
1214 	do_dummy_read(skel->progs.dump_bpf_link);
1215 
1216 	bpf_iter_bpf_link__destroy(skel);
1217 }
1218 
1219 static void test_ksym_iter(void)
1220 {
1221 	struct bpf_iter_ksym *skel;
1222 
1223 	skel = bpf_iter_ksym__open_and_load();
1224 	if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
1225 		return;
1226 
1227 	do_dummy_read(skel->progs.dump_ksym);
1228 
1229 	bpf_iter_ksym__destroy(skel);
1230 }
1231 
1232 #define CMP_BUFFER_SIZE 1024
1233 static char task_vma_output[CMP_BUFFER_SIZE];
1234 static char proc_maps_output[CMP_BUFFER_SIZE];
1235 
1236 /* remove \0 and \t from str, and only keep the first line */
1237 static void str_strip_first_line(char *str)
1238 {
1239 	char *dst = str, *src = str;
1240 
1241 	do {
1242 		if (*src == ' ' || *src == '\t')
1243 			src++;
1244 		else
1245 			*(dst++) = *(src++);
1246 
1247 	} while (*src != '\0' && *src != '\n');
1248 
1249 	*dst = '\0';
1250 }
1251 
1252 static void test_task_vma(void)
1253 {
1254 	int err, iter_fd = -1, proc_maps_fd = -1;
1255 	struct bpf_iter_task_vma *skel;
1256 	int len, read_size = 4;
1257 	char maps_path[64];
1258 
1259 	skel = bpf_iter_task_vma__open();
1260 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
1261 		return;
1262 
1263 	skel->bss->pid = getpid();
1264 
1265 	err = bpf_iter_task_vma__load(skel);
1266 	if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
1267 		goto out;
1268 
1269 	skel->links.proc_maps = bpf_program__attach_iter(
1270 		skel->progs.proc_maps, NULL);
1271 
1272 	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1273 		skel->links.proc_maps = NULL;
1274 		goto out;
1275 	}
1276 
1277 	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1278 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1279 		goto out;
1280 
1281 	/* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1282 	 * to trigger seq_file corner cases.
1283 	 */
1284 	len = 0;
1285 	while (len < CMP_BUFFER_SIZE) {
1286 		err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1287 					  MIN(read_size, CMP_BUFFER_SIZE - len));
1288 		if (!err)
1289 			break;
1290 		if (!ASSERT_GE(err, 0, "read_iter_fd"))
1291 			goto out;
1292 		len += err;
1293 	}
1294 
1295 	/* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1296 	snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1297 	proc_maps_fd = open(maps_path, O_RDONLY);
1298 	if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
1299 		goto out;
1300 	err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1301 	if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
1302 		goto out;
1303 
1304 	/* strip and compare the first line of the two files */
1305 	str_strip_first_line(task_vma_output);
1306 	str_strip_first_line(proc_maps_output);
1307 
1308 	ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
1309 out:
1310 	close(proc_maps_fd);
1311 	close(iter_fd);
1312 	bpf_iter_task_vma__destroy(skel);
1313 }
1314 
1315 void test_bpf_sockmap_map_iter_fd(void)
1316 {
1317 	struct bpf_iter_sockmap *skel;
1318 
1319 	skel = bpf_iter_sockmap__open_and_load();
1320 	if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
1321 		return;
1322 
1323 	do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
1324 
1325 	bpf_iter_sockmap__destroy(skel);
1326 }
1327 
1328 void test_bpf_iter(void)
1329 {
1330 	if (test__start_subtest("btf_id_or_null"))
1331 		test_btf_id_or_null();
1332 	if (test__start_subtest("ipv6_route"))
1333 		test_ipv6_route();
1334 	if (test__start_subtest("netlink"))
1335 		test_netlink();
1336 	if (test__start_subtest("bpf_map"))
1337 		test_bpf_map();
1338 	if (test__start_subtest("task"))
1339 		test_task();
1340 	if (test__start_subtest("task_sleepable"))
1341 		test_task_sleepable();
1342 	if (test__start_subtest("task_stack"))
1343 		test_task_stack();
1344 	if (test__start_subtest("task_file"))
1345 		test_task_file();
1346 	if (test__start_subtest("task_vma"))
1347 		test_task_vma();
1348 	if (test__start_subtest("task_btf"))
1349 		test_task_btf();
1350 	if (test__start_subtest("tcp4"))
1351 		test_tcp4();
1352 	if (test__start_subtest("tcp6"))
1353 		test_tcp6();
1354 	if (test__start_subtest("udp4"))
1355 		test_udp4();
1356 	if (test__start_subtest("udp6"))
1357 		test_udp6();
1358 	if (test__start_subtest("unix"))
1359 		test_unix();
1360 	if (test__start_subtest("anon"))
1361 		test_anon_iter(false);
1362 	if (test__start_subtest("anon-read-one-char"))
1363 		test_anon_iter(true);
1364 	if (test__start_subtest("file"))
1365 		test_file_iter();
1366 	if (test__start_subtest("overflow"))
1367 		test_overflow(false, false);
1368 	if (test__start_subtest("overflow-e2big"))
1369 		test_overflow(true, false);
1370 	if (test__start_subtest("prog-ret-1"))
1371 		test_overflow(false, true);
1372 	if (test__start_subtest("bpf_hash_map"))
1373 		test_bpf_hash_map();
1374 	if (test__start_subtest("bpf_percpu_hash_map"))
1375 		test_bpf_percpu_hash_map();
1376 	if (test__start_subtest("bpf_array_map"))
1377 		test_bpf_array_map();
1378 	if (test__start_subtest("bpf_array_map_iter_fd"))
1379 		test_bpf_array_map_iter_fd();
1380 	if (test__start_subtest("bpf_percpu_array_map"))
1381 		test_bpf_percpu_array_map();
1382 	if (test__start_subtest("bpf_sk_storage_map"))
1383 		test_bpf_sk_storage_map();
1384 	if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
1385 		test_bpf_sk_stoarge_map_iter_fd();
1386 	if (test__start_subtest("bpf_sk_storage_delete"))
1387 		test_bpf_sk_storage_delete();
1388 	if (test__start_subtest("bpf_sk_storage_get"))
1389 		test_bpf_sk_storage_get();
1390 	if (test__start_subtest("rdonly-buf-out-of-bound"))
1391 		test_rdonly_buf_out_of_bound();
1392 	if (test__start_subtest("buf-neg-offset"))
1393 		test_buf_neg_offset();
1394 	if (test__start_subtest("link-iter"))
1395 		test_link_iter();
1396 	if (test__start_subtest("ksym"))
1397 		test_ksym_iter();
1398 	if (test__start_subtest("bpf_sockmap_map_iter_fd"))
1399 		test_bpf_sockmap_map_iter_fd();
1400 }
1401