xref: /linux/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Cloudflare
3 #include <error.h>
4 #include <netinet/tcp.h>
5 #include <sys/epoll.h>
6 
7 #include "test_progs.h"
8 #include "test_skmsg_load_helpers.skel.h"
9 #include "test_sockmap_update.skel.h"
10 #include "test_sockmap_invalid_update.skel.h"
11 #include "test_sockmap_skb_verdict_attach.skel.h"
12 #include "test_sockmap_progs_query.skel.h"
13 #include "test_sockmap_pass_prog.skel.h"
14 #include "test_sockmap_drop_prog.skel.h"
15 #include "bpf_iter_sockmap.skel.h"
16 
17 #include "sockmap_helpers.h"
18 
19 #define TCP_REPAIR		19	/* TCP sock is under repair right now */
20 
21 #define TCP_REPAIR_ON		1
22 #define TCP_REPAIR_OFF_NO_WP	-1	/* Turn off without window probes */
23 
24 static int connected_socket_v4(void)
25 {
26 	struct sockaddr_in addr = {
27 		.sin_family = AF_INET,
28 		.sin_port = htons(80),
29 		.sin_addr = { inet_addr("127.0.0.1") },
30 	};
31 	socklen_t len = sizeof(addr);
32 	int s, repair, err;
33 
34 	s = socket(AF_INET, SOCK_STREAM, 0);
35 	if (!ASSERT_GE(s, 0, "socket"))
36 		goto error;
37 
38 	repair = TCP_REPAIR_ON;
39 	err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
40 	if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
41 		goto error;
42 
43 	err = connect(s, (struct sockaddr *)&addr, len);
44 	if (!ASSERT_OK(err, "connect"))
45 		goto error;
46 
47 	repair = TCP_REPAIR_OFF_NO_WP;
48 	err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
49 	if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
50 		goto error;
51 
52 	return s;
53 error:
54 	perror(__func__);
55 	close(s);
56 	return -1;
57 }
58 
59 static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
60 {
61 	__u32 i, max_entries = bpf_map__max_entries(src);
62 	int err, src_fd, dst_fd;
63 
64 	src_fd = bpf_map__fd(src);
65 	dst_fd = bpf_map__fd(dst);
66 
67 	for (i = 0; i < max_entries; i++) {
68 		__u64 src_cookie, dst_cookie;
69 
70 		err = bpf_map_lookup_elem(src_fd, &i, &src_cookie);
71 		if (err && errno == ENOENT) {
72 			err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
73 			ASSERT_ERR(err, "map_lookup_elem(dst)");
74 			ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)");
75 			continue;
76 		}
77 		if (!ASSERT_OK(err, "lookup_elem(src)"))
78 			continue;
79 
80 		err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
81 		if (!ASSERT_OK(err, "lookup_elem(dst)"))
82 			continue;
83 
84 		ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch");
85 	}
86 }
87 
88 /* Create a map, populate it with one socket, and free the map. */
89 static void test_sockmap_create_update_free(enum bpf_map_type map_type)
90 {
91 	const int zero = 0;
92 	int s, map, err;
93 
94 	s = connected_socket_v4();
95 	if (!ASSERT_GE(s, 0, "connected_socket_v4"))
96 		return;
97 
98 	map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
99 	if (!ASSERT_GE(map, 0, "bpf_map_create"))
100 		goto out;
101 
102 	err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
103 	if (!ASSERT_OK(err, "bpf_map_update"))
104 		goto out;
105 
106 out:
107 	close(map);
108 	close(s);
109 }
110 
111 static void test_skmsg_helpers(enum bpf_map_type map_type)
112 {
113 	struct test_skmsg_load_helpers *skel;
114 	int err, map, verdict;
115 
116 	skel = test_skmsg_load_helpers__open_and_load();
117 	if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
118 		return;
119 
120 	verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
121 	map = bpf_map__fd(skel->maps.sock_map);
122 
123 	err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
124 	if (!ASSERT_OK(err, "bpf_prog_attach"))
125 		goto out;
126 
127 	err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT);
128 	if (!ASSERT_OK(err, "bpf_prog_detach2"))
129 		goto out;
130 out:
131 	test_skmsg_load_helpers__destroy(skel);
132 }
133 
134 static void test_skmsg_helpers_with_link(enum bpf_map_type map_type)
135 {
136 	struct bpf_program *prog, *prog_clone, *prog_clone2;
137 	DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts);
138 	struct test_skmsg_load_helpers *skel;
139 	struct bpf_link *link, *link2;
140 	int err, map;
141 
142 	skel = test_skmsg_load_helpers__open_and_load();
143 	if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
144 		return;
145 
146 	prog = skel->progs.prog_msg_verdict;
147 	prog_clone = skel->progs.prog_msg_verdict_clone;
148 	prog_clone2 = skel->progs.prog_msg_verdict_clone2;
149 	map = bpf_map__fd(skel->maps.sock_map);
150 
151 	link = bpf_program__attach_sockmap(prog, map);
152 	if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
153 		goto out;
154 
155 	/* Fail since bpf_link for the same prog has been created. */
156 	err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_MSG_VERDICT, 0);
157 	if (!ASSERT_ERR(err, "bpf_prog_attach"))
158 		goto out;
159 
160 	/* Fail since bpf_link for the same prog type has been created. */
161 	link2 = bpf_program__attach_sockmap(prog_clone, map);
162 	if (!ASSERT_ERR_PTR(link2, "bpf_program__attach_sockmap")) {
163 		bpf_link__detach(link2);
164 		goto out;
165 	}
166 
167 	err = bpf_link__update_program(link, prog_clone);
168 	if (!ASSERT_OK(err, "bpf_link__update_program"))
169 		goto out;
170 
171 	/* Fail since a prog with different type attempts to do update. */
172 	err = bpf_link__update_program(link, skel->progs.prog_skb_verdict);
173 	if (!ASSERT_ERR(err, "bpf_link__update_program"))
174 		goto out;
175 
176 	/* Fail since the old prog does not match the one in the kernel. */
177 	opts.old_prog_fd = bpf_program__fd(prog_clone2);
178 	opts.flags = BPF_F_REPLACE;
179 	err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts);
180 	if (!ASSERT_ERR(err, "bpf_link_update"))
181 		goto out;
182 
183 	opts.old_prog_fd = bpf_program__fd(prog_clone);
184 	opts.flags = BPF_F_REPLACE;
185 	err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts);
186 	if (!ASSERT_OK(err, "bpf_link_update"))
187 		goto out;
188 out:
189 	bpf_link__detach(link);
190 	test_skmsg_load_helpers__destroy(skel);
191 }
192 
193 static void test_sockmap_update(enum bpf_map_type map_type)
194 {
195 	int err, prog, src;
196 	struct test_sockmap_update *skel;
197 	struct bpf_map *dst_map;
198 	const __u32 zero = 0;
199 	char dummy[14] = {0};
200 	LIBBPF_OPTS(bpf_test_run_opts, topts,
201 		.data_in = dummy,
202 		.data_size_in = sizeof(dummy),
203 		.repeat = 1,
204 	);
205 	__s64 sk;
206 
207 	sk = connected_socket_v4();
208 	if (!ASSERT_NEQ(sk, -1, "connected_socket_v4"))
209 		return;
210 
211 	skel = test_sockmap_update__open_and_load();
212 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
213 		goto close_sk;
214 
215 	prog = bpf_program__fd(skel->progs.copy_sock_map);
216 	src = bpf_map__fd(skel->maps.src);
217 	if (map_type == BPF_MAP_TYPE_SOCKMAP)
218 		dst_map = skel->maps.dst_sock_map;
219 	else
220 		dst_map = skel->maps.dst_sock_hash;
221 
222 	err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
223 	if (!ASSERT_OK(err, "update_elem(src)"))
224 		goto out;
225 
226 	err = bpf_prog_test_run_opts(prog, &topts);
227 	if (!ASSERT_OK(err, "test_run"))
228 		goto out;
229 	if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
230 		goto out;
231 
232 	compare_cookies(skel->maps.src, dst_map);
233 
234 out:
235 	test_sockmap_update__destroy(skel);
236 close_sk:
237 	close(sk);
238 }
239 
240 static void test_sockmap_invalid_update(void)
241 {
242 	struct test_sockmap_invalid_update *skel;
243 
244 	skel = test_sockmap_invalid_update__open_and_load();
245 	if (!ASSERT_NULL(skel, "open_and_load"))
246 		test_sockmap_invalid_update__destroy(skel);
247 }
248 
249 static void test_sockmap_copy(enum bpf_map_type map_type)
250 {
251 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
252 	int err, len, src_fd, iter_fd;
253 	union bpf_iter_link_info linfo = {};
254 	__u32 i, num_sockets, num_elems;
255 	struct bpf_iter_sockmap *skel;
256 	__s64 *sock_fd = NULL;
257 	struct bpf_link *link;
258 	struct bpf_map *src;
259 	char buf[64];
260 
261 	skel = bpf_iter_sockmap__open_and_load();
262 	if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
263 		return;
264 
265 	if (map_type == BPF_MAP_TYPE_SOCKMAP) {
266 		src = skel->maps.sockmap;
267 		num_elems = bpf_map__max_entries(src);
268 		num_sockets = num_elems - 1;
269 	} else {
270 		src = skel->maps.sockhash;
271 		num_elems = bpf_map__max_entries(src) - 1;
272 		num_sockets = num_elems;
273 	}
274 
275 	sock_fd = calloc(num_sockets, sizeof(*sock_fd));
276 	if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)"))
277 		goto out;
278 
279 	for (i = 0; i < num_sockets; i++)
280 		sock_fd[i] = -1;
281 
282 	src_fd = bpf_map__fd(src);
283 
284 	for (i = 0; i < num_sockets; i++) {
285 		sock_fd[i] = connected_socket_v4();
286 		if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4"))
287 			goto out;
288 
289 		err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
290 		if (!ASSERT_OK(err, "map_update"))
291 			goto out;
292 	}
293 
294 	linfo.map.map_fd = src_fd;
295 	opts.link_info = &linfo;
296 	opts.link_info_len = sizeof(linfo);
297 	link = bpf_program__attach_iter(skel->progs.copy, &opts);
298 	if (!ASSERT_OK_PTR(link, "attach_iter"))
299 		goto out;
300 
301 	iter_fd = bpf_iter_create(bpf_link__fd(link));
302 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
303 		goto free_link;
304 
305 	/* do some tests */
306 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
307 		;
308 	if (!ASSERT_GE(len, 0, "read"))
309 		goto close_iter;
310 
311 	/* test results */
312 	if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems"))
313 		goto close_iter;
314 
315 	if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks"))
316 		goto close_iter;
317 
318 	compare_cookies(src, skel->maps.dst);
319 
320 close_iter:
321 	close(iter_fd);
322 free_link:
323 	bpf_link__destroy(link);
324 out:
325 	for (i = 0; sock_fd && i < num_sockets; i++)
326 		if (sock_fd[i] >= 0)
327 			close(sock_fd[i]);
328 	if (sock_fd)
329 		free(sock_fd);
330 	bpf_iter_sockmap__destroy(skel);
331 }
332 
333 static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
334 					    enum bpf_attach_type second)
335 {
336 	struct test_sockmap_skb_verdict_attach *skel;
337 	int err, map, verdict;
338 
339 	skel = test_sockmap_skb_verdict_attach__open_and_load();
340 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
341 		return;
342 
343 	verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
344 	map = bpf_map__fd(skel->maps.sock_map);
345 
346 	err = bpf_prog_attach(verdict, map, first, 0);
347 	if (!ASSERT_OK(err, "bpf_prog_attach"))
348 		goto out;
349 
350 	err = bpf_prog_attach(verdict, map, second, 0);
351 	ASSERT_EQ(err, -EBUSY, "prog_attach_fail");
352 
353 	err = bpf_prog_detach2(verdict, map, first);
354 	if (!ASSERT_OK(err, "bpf_prog_detach2"))
355 		goto out;
356 out:
357 	test_sockmap_skb_verdict_attach__destroy(skel);
358 }
359 
360 static void test_sockmap_skb_verdict_attach_with_link(void)
361 {
362 	struct test_sockmap_skb_verdict_attach *skel;
363 	struct bpf_program *prog;
364 	struct bpf_link *link;
365 	int err, map;
366 
367 	skel = test_sockmap_skb_verdict_attach__open_and_load();
368 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
369 		return;
370 	prog = skel->progs.prog_skb_verdict;
371 	map = bpf_map__fd(skel->maps.sock_map);
372 	link = bpf_program__attach_sockmap(prog, map);
373 	if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
374 		goto out;
375 
376 	bpf_link__detach(link);
377 
378 	err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
379 	if (!ASSERT_OK(err, "bpf_prog_attach"))
380 		goto out;
381 
382 	/* Fail since attaching with the same prog/map has been done. */
383 	link = bpf_program__attach_sockmap(prog, map);
384 	if (!ASSERT_ERR_PTR(link, "bpf_program__attach_sockmap"))
385 		bpf_link__detach(link);
386 
387 	err = bpf_prog_detach2(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT);
388 	if (!ASSERT_OK(err, "bpf_prog_detach2"))
389 		goto out;
390 out:
391 	test_sockmap_skb_verdict_attach__destroy(skel);
392 }
393 
394 static __u32 query_prog_id(int prog_fd)
395 {
396 	struct bpf_prog_info info = {};
397 	__u32 info_len = sizeof(info);
398 	int err;
399 
400 	err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
401 	if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
402 	    !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd"))
403 		return 0;
404 
405 	return info.id;
406 }
407 
408 static void test_sockmap_progs_query(enum bpf_attach_type attach_type)
409 {
410 	struct test_sockmap_progs_query *skel;
411 	int err, map_fd, verdict_fd;
412 	__u32 attach_flags = 0;
413 	__u32 prog_ids[3] = {};
414 	__u32 prog_cnt = 3;
415 
416 	skel = test_sockmap_progs_query__open_and_load();
417 	if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load"))
418 		return;
419 
420 	map_fd = bpf_map__fd(skel->maps.sock_map);
421 
422 	if (attach_type == BPF_SK_MSG_VERDICT)
423 		verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict);
424 	else
425 		verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict);
426 
427 	err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
428 			     &attach_flags, prog_ids, &prog_cnt);
429 	ASSERT_OK(err, "bpf_prog_query failed");
430 	ASSERT_EQ(attach_flags,  0, "wrong attach_flags on query");
431 	ASSERT_EQ(prog_cnt, 0, "wrong program count on query");
432 
433 	err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0);
434 	if (!ASSERT_OK(err, "bpf_prog_attach failed"))
435 		goto out;
436 
437 	prog_cnt = 1;
438 	err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
439 			     &attach_flags, prog_ids, &prog_cnt);
440 	ASSERT_OK(err, "bpf_prog_query failed");
441 	ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
442 	ASSERT_EQ(prog_cnt, 1, "wrong program count on query");
443 	ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd),
444 		  "wrong prog_ids on query");
445 
446 	bpf_prog_detach2(verdict_fd, map_fd, attach_type);
447 out:
448 	test_sockmap_progs_query__destroy(skel);
449 }
450 
451 #define MAX_EVENTS 10
452 static void test_sockmap_skb_verdict_shutdown(void)
453 {
454 	int n, err, map, verdict, c1 = -1, p1 = -1;
455 	struct epoll_event ev, events[MAX_EVENTS];
456 	struct test_sockmap_pass_prog *skel;
457 	int zero = 0;
458 	int epollfd;
459 	char b;
460 
461 	skel = test_sockmap_pass_prog__open_and_load();
462 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
463 		return;
464 
465 	verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
466 	map = bpf_map__fd(skel->maps.sock_map_rx);
467 
468 	err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
469 	if (!ASSERT_OK(err, "bpf_prog_attach"))
470 		goto out;
471 
472 	err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1);
473 	if (err < 0)
474 		goto out;
475 
476 	err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
477 	if (err < 0)
478 		goto out_close;
479 
480 	shutdown(p1, SHUT_WR);
481 
482 	ev.events = EPOLLIN;
483 	ev.data.fd = c1;
484 
485 	epollfd = epoll_create1(0);
486 	if (!ASSERT_GT(epollfd, -1, "epoll_create(0)"))
487 		goto out_close;
488 	err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev);
489 	if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)"))
490 		goto out_close;
491 	err = epoll_wait(epollfd, events, MAX_EVENTS, -1);
492 	if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
493 		goto out_close;
494 
495 	n = recv(c1, &b, 1, SOCK_NONBLOCK);
496 	ASSERT_EQ(n, 0, "recv_timeout(fin)");
497 out_close:
498 	close(c1);
499 	close(p1);
500 out:
501 	test_sockmap_pass_prog__destroy(skel);
502 }
503 
504 static void test_sockmap_skb_verdict_fionread(bool pass_prog)
505 {
506 	int err, map, verdict, c0 = -1, c1 = -1, p0 = -1, p1 = -1;
507 	int expected, zero = 0, sent, recvd, avail;
508 	struct test_sockmap_pass_prog *pass = NULL;
509 	struct test_sockmap_drop_prog *drop = NULL;
510 	char buf[256] = "0123456789";
511 
512 	if (pass_prog) {
513 		pass = test_sockmap_pass_prog__open_and_load();
514 		if (!ASSERT_OK_PTR(pass, "open_and_load"))
515 			return;
516 		verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
517 		map = bpf_map__fd(pass->maps.sock_map_rx);
518 		expected = sizeof(buf);
519 	} else {
520 		drop = test_sockmap_drop_prog__open_and_load();
521 		if (!ASSERT_OK_PTR(drop, "open_and_load"))
522 			return;
523 		verdict = bpf_program__fd(drop->progs.prog_skb_verdict);
524 		map = bpf_map__fd(drop->maps.sock_map_rx);
525 		/* On drop data is consumed immediately and copied_seq inc'd */
526 		expected = 0;
527 	}
528 
529 
530 	err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
531 	if (!ASSERT_OK(err, "bpf_prog_attach"))
532 		goto out;
533 
534 	err = create_socket_pairs(AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1);
535 	if (!ASSERT_OK(err, "create_socket_pairs()"))
536 		goto out;
537 
538 	err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
539 	if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
540 		goto out_close;
541 
542 	sent = xsend(p1, &buf, sizeof(buf), 0);
543 	ASSERT_EQ(sent, sizeof(buf), "xsend(p0)");
544 	err = ioctl(c1, FIONREAD, &avail);
545 	ASSERT_OK(err, "ioctl(FIONREAD) error");
546 	ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
547 	/* On DROP test there will be no data to read */
548 	if (pass_prog) {
549 		recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC);
550 		ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
551 	}
552 
553 out_close:
554 	close(c0);
555 	close(p0);
556 	close(c1);
557 	close(p1);
558 out:
559 	if (pass_prog)
560 		test_sockmap_pass_prog__destroy(pass);
561 	else
562 		test_sockmap_drop_prog__destroy(drop);
563 }
564 
565 static void test_sockmap_skb_verdict_peek_helper(int map)
566 {
567 	int err, c1, p1, zero = 0, sent, recvd, avail;
568 	char snd[256] = "0123456789";
569 	char rcv[256] = "0";
570 
571 	err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1);
572 	if (!ASSERT_OK(err, "create_pair()"))
573 		return;
574 
575 	err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
576 	if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
577 		goto out_close;
578 
579 	sent = xsend(p1, snd, sizeof(snd), 0);
580 	ASSERT_EQ(sent, sizeof(snd), "xsend(p1)");
581 	recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK);
582 	ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)");
583 	err = ioctl(c1, FIONREAD, &avail);
584 	ASSERT_OK(err, "ioctl(FIONREAD) error");
585 	ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)");
586 	recvd = recv(c1, rcv, sizeof(rcv), 0);
587 	ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)");
588 	err = ioctl(c1, FIONREAD, &avail);
589 	ASSERT_OK(err, "ioctl(FIONREAD) error");
590 	ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)");
591 
592 out_close:
593 	close(c1);
594 	close(p1);
595 }
596 
597 static void test_sockmap_skb_verdict_peek(void)
598 {
599 	struct test_sockmap_pass_prog *pass;
600 	int err, map, verdict;
601 
602 	pass = test_sockmap_pass_prog__open_and_load();
603 	if (!ASSERT_OK_PTR(pass, "open_and_load"))
604 		return;
605 	verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
606 	map = bpf_map__fd(pass->maps.sock_map_rx);
607 
608 	err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
609 	if (!ASSERT_OK(err, "bpf_prog_attach"))
610 		goto out;
611 
612 	test_sockmap_skb_verdict_peek_helper(map);
613 
614 out:
615 	test_sockmap_pass_prog__destroy(pass);
616 }
617 
618 static void test_sockmap_skb_verdict_peek_with_link(void)
619 {
620 	struct test_sockmap_pass_prog *pass;
621 	struct bpf_program *prog;
622 	struct bpf_link *link;
623 	int err, map;
624 
625 	pass = test_sockmap_pass_prog__open_and_load();
626 	if (!ASSERT_OK_PTR(pass, "open_and_load"))
627 		return;
628 	prog = pass->progs.prog_skb_verdict;
629 	map = bpf_map__fd(pass->maps.sock_map_rx);
630 	link = bpf_program__attach_sockmap(prog, map);
631 	if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
632 		goto out;
633 
634 	err = bpf_link__update_program(link, pass->progs.prog_skb_verdict_clone);
635 	if (!ASSERT_OK(err, "bpf_link__update_program"))
636 		goto out;
637 
638 	/* Fail since a prog with different attach type attempts to do update. */
639 	err = bpf_link__update_program(link, pass->progs.prog_skb_parser);
640 	if (!ASSERT_ERR(err, "bpf_link__update_program"))
641 		goto out;
642 
643 	test_sockmap_skb_verdict_peek_helper(map);
644 	ASSERT_EQ(pass->bss->clone_called, 1, "clone_called");
645 out:
646 	bpf_link__detach(link);
647 	test_sockmap_pass_prog__destroy(pass);
648 }
649 
650 static void test_sockmap_unconnected_unix(void)
651 {
652 	int err, map, stream = 0, dgram = 0, zero = 0;
653 	struct test_sockmap_pass_prog *skel;
654 
655 	skel = test_sockmap_pass_prog__open_and_load();
656 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
657 		return;
658 
659 	map = bpf_map__fd(skel->maps.sock_map_rx);
660 
661 	stream = xsocket(AF_UNIX, SOCK_STREAM, 0);
662 	if (stream < 0)
663 		return;
664 
665 	dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
666 	if (dgram < 0) {
667 		close(stream);
668 		return;
669 	}
670 
671 	err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY);
672 	ASSERT_ERR(err, "bpf_map_update_elem(stream)");
673 
674 	err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
675 	ASSERT_OK(err, "bpf_map_update_elem(dgram)");
676 
677 	close(stream);
678 	close(dgram);
679 }
680 
681 static void test_sockmap_many_socket(void)
682 {
683 	struct test_sockmap_pass_prog *skel;
684 	int stream[2], dgram, udp, tcp;
685 	int i, err, map, entry = 0;
686 
687 	skel = test_sockmap_pass_prog__open_and_load();
688 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
689 		return;
690 
691 	map = bpf_map__fd(skel->maps.sock_map_rx);
692 
693 	dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
694 	if (dgram < 0) {
695 		test_sockmap_pass_prog__destroy(skel);
696 		return;
697 	}
698 
699 	tcp = connected_socket_v4();
700 	if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
701 		close(dgram);
702 		test_sockmap_pass_prog__destroy(skel);
703 		return;
704 	}
705 
706 	udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
707 	if (udp < 0) {
708 		close(dgram);
709 		close(tcp);
710 		test_sockmap_pass_prog__destroy(skel);
711 		return;
712 	}
713 
714 	err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
715 	ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
716 	if (err)
717 		goto out;
718 
719 	for (i = 0; i < 2; i++, entry++) {
720 		err = bpf_map_update_elem(map, &entry, &stream[0], BPF_ANY);
721 		ASSERT_OK(err, "bpf_map_update_elem(stream)");
722 	}
723 	for (i = 0; i < 2; i++, entry++) {
724 		err = bpf_map_update_elem(map, &entry, &dgram, BPF_ANY);
725 		ASSERT_OK(err, "bpf_map_update_elem(dgram)");
726 	}
727 	for (i = 0; i < 2; i++, entry++) {
728 		err = bpf_map_update_elem(map, &entry, &udp, BPF_ANY);
729 		ASSERT_OK(err, "bpf_map_update_elem(udp)");
730 	}
731 	for (i = 0; i < 2; i++, entry++) {
732 		err = bpf_map_update_elem(map, &entry, &tcp, BPF_ANY);
733 		ASSERT_OK(err, "bpf_map_update_elem(tcp)");
734 	}
735 	for (entry--; entry >= 0; entry--) {
736 		err = bpf_map_delete_elem(map, &entry);
737 		ASSERT_OK(err, "bpf_map_delete_elem(entry)");
738 	}
739 
740 	close(stream[0]);
741 	close(stream[1]);
742 out:
743 	close(dgram);
744 	close(tcp);
745 	close(udp);
746 	test_sockmap_pass_prog__destroy(skel);
747 }
748 
749 static void test_sockmap_many_maps(void)
750 {
751 	struct test_sockmap_pass_prog *skel;
752 	int stream[2], dgram, udp, tcp;
753 	int i, err, map[2], entry = 0;
754 
755 	skel = test_sockmap_pass_prog__open_and_load();
756 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
757 		return;
758 
759 	map[0] = bpf_map__fd(skel->maps.sock_map_rx);
760 	map[1] = bpf_map__fd(skel->maps.sock_map_tx);
761 
762 	dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
763 	if (dgram < 0) {
764 		test_sockmap_pass_prog__destroy(skel);
765 		return;
766 	}
767 
768 	tcp = connected_socket_v4();
769 	if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
770 		close(dgram);
771 		test_sockmap_pass_prog__destroy(skel);
772 		return;
773 	}
774 
775 	udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
776 	if (udp < 0) {
777 		close(dgram);
778 		close(tcp);
779 		test_sockmap_pass_prog__destroy(skel);
780 		return;
781 	}
782 
783 	err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
784 	ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
785 	if (err)
786 		goto out;
787 
788 	for (i = 0; i < 2; i++, entry++) {
789 		err = bpf_map_update_elem(map[i], &entry, &stream[0], BPF_ANY);
790 		ASSERT_OK(err, "bpf_map_update_elem(stream)");
791 	}
792 	for (i = 0; i < 2; i++, entry++) {
793 		err = bpf_map_update_elem(map[i], &entry, &dgram, BPF_ANY);
794 		ASSERT_OK(err, "bpf_map_update_elem(dgram)");
795 	}
796 	for (i = 0; i < 2; i++, entry++) {
797 		err = bpf_map_update_elem(map[i], &entry, &udp, BPF_ANY);
798 		ASSERT_OK(err, "bpf_map_update_elem(udp)");
799 	}
800 	for (i = 0; i < 2; i++, entry++) {
801 		err = bpf_map_update_elem(map[i], &entry, &tcp, BPF_ANY);
802 		ASSERT_OK(err, "bpf_map_update_elem(tcp)");
803 	}
804 	for (entry--; entry >= 0; entry--) {
805 		err = bpf_map_delete_elem(map[1], &entry);
806 		entry--;
807 		ASSERT_OK(err, "bpf_map_delete_elem(entry)");
808 		err = bpf_map_delete_elem(map[0], &entry);
809 		ASSERT_OK(err, "bpf_map_delete_elem(entry)");
810 	}
811 
812 	close(stream[0]);
813 	close(stream[1]);
814 out:
815 	close(dgram);
816 	close(tcp);
817 	close(udp);
818 	test_sockmap_pass_prog__destroy(skel);
819 }
820 
821 static void test_sockmap_same_sock(void)
822 {
823 	struct test_sockmap_pass_prog *skel;
824 	int stream[2], dgram, udp, tcp;
825 	int i, err, map, zero = 0;
826 
827 	skel = test_sockmap_pass_prog__open_and_load();
828 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
829 		return;
830 
831 	map = bpf_map__fd(skel->maps.sock_map_rx);
832 
833 	dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
834 	if (dgram < 0) {
835 		test_sockmap_pass_prog__destroy(skel);
836 		return;
837 	}
838 
839 	tcp = connected_socket_v4();
840 	if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
841 		close(dgram);
842 		test_sockmap_pass_prog__destroy(skel);
843 		return;
844 	}
845 
846 	udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
847 	if (udp < 0) {
848 		close(dgram);
849 		close(tcp);
850 		test_sockmap_pass_prog__destroy(skel);
851 		return;
852 	}
853 
854 	err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
855 	ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
856 	if (err)
857 		goto out;
858 
859 	for (i = 0; i < 2; i++) {
860 		err = bpf_map_update_elem(map, &zero, &stream[0], BPF_ANY);
861 		ASSERT_OK(err, "bpf_map_update_elem(stream)");
862 	}
863 	for (i = 0; i < 2; i++) {
864 		err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
865 		ASSERT_OK(err, "bpf_map_update_elem(dgram)");
866 	}
867 	for (i = 0; i < 2; i++) {
868 		err = bpf_map_update_elem(map, &zero, &udp, BPF_ANY);
869 		ASSERT_OK(err, "bpf_map_update_elem(udp)");
870 	}
871 	for (i = 0; i < 2; i++) {
872 		err = bpf_map_update_elem(map, &zero, &tcp, BPF_ANY);
873 		ASSERT_OK(err, "bpf_map_update_elem(tcp)");
874 	}
875 
876 	err = bpf_map_delete_elem(map, &zero);
877 	ASSERT_OK(err, "bpf_map_delete_elem(entry)");
878 
879 	close(stream[0]);
880 	close(stream[1]);
881 out:
882 	close(dgram);
883 	close(tcp);
884 	close(udp);
885 	test_sockmap_pass_prog__destroy(skel);
886 }
887 
888 void test_sockmap_basic(void)
889 {
890 	if (test__start_subtest("sockmap create_update_free"))
891 		test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
892 	if (test__start_subtest("sockhash create_update_free"))
893 		test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
894 	if (test__start_subtest("sockmap sk_msg load helpers"))
895 		test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
896 	if (test__start_subtest("sockhash sk_msg load helpers"))
897 		test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
898 	if (test__start_subtest("sockmap update"))
899 		test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
900 	if (test__start_subtest("sockhash update"))
901 		test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
902 	if (test__start_subtest("sockmap update in unsafe context"))
903 		test_sockmap_invalid_update();
904 	if (test__start_subtest("sockmap copy"))
905 		test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
906 	if (test__start_subtest("sockhash copy"))
907 		test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
908 	if (test__start_subtest("sockmap skb_verdict attach")) {
909 		test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT,
910 						BPF_SK_SKB_STREAM_VERDICT);
911 		test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,
912 						BPF_SK_SKB_VERDICT);
913 	}
914 	if (test__start_subtest("sockmap skb_verdict attach_with_link"))
915 		test_sockmap_skb_verdict_attach_with_link();
916 	if (test__start_subtest("sockmap msg_verdict progs query"))
917 		test_sockmap_progs_query(BPF_SK_MSG_VERDICT);
918 	if (test__start_subtest("sockmap stream_parser progs query"))
919 		test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER);
920 	if (test__start_subtest("sockmap stream_verdict progs query"))
921 		test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT);
922 	if (test__start_subtest("sockmap skb_verdict progs query"))
923 		test_sockmap_progs_query(BPF_SK_SKB_VERDICT);
924 	if (test__start_subtest("sockmap skb_verdict shutdown"))
925 		test_sockmap_skb_verdict_shutdown();
926 	if (test__start_subtest("sockmap skb_verdict fionread"))
927 		test_sockmap_skb_verdict_fionread(true);
928 	if (test__start_subtest("sockmap skb_verdict fionread on drop"))
929 		test_sockmap_skb_verdict_fionread(false);
930 	if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
931 		test_sockmap_skb_verdict_peek();
932 	if (test__start_subtest("sockmap skb_verdict msg_f_peek with link"))
933 		test_sockmap_skb_verdict_peek_with_link();
934 	if (test__start_subtest("sockmap unconnected af_unix"))
935 		test_sockmap_unconnected_unix();
936 	if (test__start_subtest("sockmap one socket to many map entries"))
937 		test_sockmap_many_socket();
938 	if (test__start_subtest("sockmap one socket to many maps"))
939 		test_sockmap_many_maps();
940 	if (test__start_subtest("sockmap same socket replace"))
941 		test_sockmap_same_sock();
942 	if (test__start_subtest("sockmap sk_msg attach sockmap helpers with link"))
943 		test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKMAP);
944 	if (test__start_subtest("sockhash sk_msg attach sockhash helpers with link"))
945 		test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKHASH);
946 }
947