1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2020 Cloudflare 3 #include <error.h> 4 #include <netinet/tcp.h> 5 #include <sys/epoll.h> 6 7 #include "test_progs.h" 8 #include "test_skmsg_load_helpers.skel.h" 9 #include "test_sockmap_update.skel.h" 10 #include "test_sockmap_invalid_update.skel.h" 11 #include "test_sockmap_skb_verdict_attach.skel.h" 12 #include "test_sockmap_progs_query.skel.h" 13 #include "test_sockmap_pass_prog.skel.h" 14 #include "test_sockmap_drop_prog.skel.h" 15 #include "bpf_iter_sockmap.skel.h" 16 17 #include "sockmap_helpers.h" 18 19 #define TCP_REPAIR 19 /* TCP sock is under repair right now */ 20 21 #define TCP_REPAIR_ON 1 22 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ 23 24 static int connected_socket_v4(void) 25 { 26 struct sockaddr_in addr = { 27 .sin_family = AF_INET, 28 .sin_port = htons(80), 29 .sin_addr = { inet_addr("127.0.0.1") }, 30 }; 31 socklen_t len = sizeof(addr); 32 int s, repair, err; 33 34 s = socket(AF_INET, SOCK_STREAM, 0); 35 if (!ASSERT_GE(s, 0, "socket")) 36 goto error; 37 38 repair = TCP_REPAIR_ON; 39 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 40 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)")) 41 goto error; 42 43 err = connect(s, (struct sockaddr *)&addr, len); 44 if (!ASSERT_OK(err, "connect")) 45 goto error; 46 47 repair = TCP_REPAIR_OFF_NO_WP; 48 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 49 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)")) 50 goto error; 51 52 return s; 53 error: 54 perror(__func__); 55 close(s); 56 return -1; 57 } 58 59 static void compare_cookies(struct bpf_map *src, struct bpf_map *dst) 60 { 61 __u32 i, max_entries = bpf_map__max_entries(src); 62 int err, src_fd, dst_fd; 63 64 src_fd = bpf_map__fd(src); 65 dst_fd = bpf_map__fd(dst); 66 67 for (i = 0; i < max_entries; i++) { 68 __u64 src_cookie, dst_cookie; 69 70 err = bpf_map_lookup_elem(src_fd, &i, &src_cookie); 71 if (err && errno == ENOENT) { 72 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); 73 ASSERT_ERR(err, "map_lookup_elem(dst)"); 74 ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)"); 75 continue; 76 } 77 if (!ASSERT_OK(err, "lookup_elem(src)")) 78 continue; 79 80 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); 81 if (!ASSERT_OK(err, "lookup_elem(dst)")) 82 continue; 83 84 ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch"); 85 } 86 } 87 88 /* Create a map, populate it with one socket, and free the map. */ 89 static void test_sockmap_create_update_free(enum bpf_map_type map_type) 90 { 91 const int zero = 0; 92 int s, map, err; 93 94 s = connected_socket_v4(); 95 if (!ASSERT_GE(s, 0, "connected_socket_v4")) 96 return; 97 98 map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); 99 if (!ASSERT_GE(map, 0, "bpf_map_create")) 100 goto out; 101 102 err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); 103 if (!ASSERT_OK(err, "bpf_map_update")) 104 goto out; 105 106 out: 107 close(map); 108 close(s); 109 } 110 111 static void test_skmsg_helpers(enum bpf_map_type map_type) 112 { 113 struct test_skmsg_load_helpers *skel; 114 int err, map, verdict; 115 116 skel = test_skmsg_load_helpers__open_and_load(); 117 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load")) 118 return; 119 120 verdict = bpf_program__fd(skel->progs.prog_msg_verdict); 121 map = bpf_map__fd(skel->maps.sock_map); 122 123 err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0); 124 if (!ASSERT_OK(err, "bpf_prog_attach")) 125 goto out; 126 127 err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT); 128 if (!ASSERT_OK(err, "bpf_prog_detach2")) 129 goto out; 130 out: 131 test_skmsg_load_helpers__destroy(skel); 132 } 133 134 static void test_skmsg_helpers_with_link(enum bpf_map_type map_type) 135 { 136 struct bpf_program *prog, *prog_clone, *prog_clone2; 137 DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts); 138 struct test_skmsg_load_helpers *skel; 139 struct bpf_link *link, *link2; 140 int err, map; 141 142 skel = test_skmsg_load_helpers__open_and_load(); 143 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load")) 144 return; 145 146 prog = skel->progs.prog_msg_verdict; 147 prog_clone = skel->progs.prog_msg_verdict_clone; 148 prog_clone2 = skel->progs.prog_msg_verdict_clone2; 149 map = bpf_map__fd(skel->maps.sock_map); 150 151 link = bpf_program__attach_sockmap(prog, map); 152 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap")) 153 goto out; 154 155 /* Fail since bpf_link for the same prog has been created. */ 156 err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_MSG_VERDICT, 0); 157 if (!ASSERT_ERR(err, "bpf_prog_attach")) 158 goto out; 159 160 /* Fail since bpf_link for the same prog type has been created. */ 161 link2 = bpf_program__attach_sockmap(prog_clone, map); 162 if (!ASSERT_ERR_PTR(link2, "bpf_program__attach_sockmap")) { 163 bpf_link__detach(link2); 164 goto out; 165 } 166 167 err = bpf_link__update_program(link, prog_clone); 168 if (!ASSERT_OK(err, "bpf_link__update_program")) 169 goto out; 170 171 /* Fail since a prog with different type attempts to do update. */ 172 err = bpf_link__update_program(link, skel->progs.prog_skb_verdict); 173 if (!ASSERT_ERR(err, "bpf_link__update_program")) 174 goto out; 175 176 /* Fail since the old prog does not match the one in the kernel. */ 177 opts.old_prog_fd = bpf_program__fd(prog_clone2); 178 opts.flags = BPF_F_REPLACE; 179 err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts); 180 if (!ASSERT_ERR(err, "bpf_link_update")) 181 goto out; 182 183 opts.old_prog_fd = bpf_program__fd(prog_clone); 184 opts.flags = BPF_F_REPLACE; 185 err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts); 186 if (!ASSERT_OK(err, "bpf_link_update")) 187 goto out; 188 out: 189 bpf_link__detach(link); 190 test_skmsg_load_helpers__destroy(skel); 191 } 192 193 static void test_sockmap_update(enum bpf_map_type map_type) 194 { 195 int err, prog, src; 196 struct test_sockmap_update *skel; 197 struct bpf_map *dst_map; 198 const __u32 zero = 0; 199 char dummy[14] = {0}; 200 LIBBPF_OPTS(bpf_test_run_opts, topts, 201 .data_in = dummy, 202 .data_size_in = sizeof(dummy), 203 .repeat = 1, 204 ); 205 __s64 sk; 206 207 sk = connected_socket_v4(); 208 if (!ASSERT_NEQ(sk, -1, "connected_socket_v4")) 209 return; 210 211 skel = test_sockmap_update__open_and_load(); 212 if (!ASSERT_OK_PTR(skel, "open_and_load")) 213 goto close_sk; 214 215 prog = bpf_program__fd(skel->progs.copy_sock_map); 216 src = bpf_map__fd(skel->maps.src); 217 if (map_type == BPF_MAP_TYPE_SOCKMAP) 218 dst_map = skel->maps.dst_sock_map; 219 else 220 dst_map = skel->maps.dst_sock_hash; 221 222 err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST); 223 if (!ASSERT_OK(err, "update_elem(src)")) 224 goto out; 225 226 err = bpf_prog_test_run_opts(prog, &topts); 227 if (!ASSERT_OK(err, "test_run")) 228 goto out; 229 if (!ASSERT_NEQ(topts.retval, 0, "test_run retval")) 230 goto out; 231 232 compare_cookies(skel->maps.src, dst_map); 233 234 out: 235 test_sockmap_update__destroy(skel); 236 close_sk: 237 close(sk); 238 } 239 240 static void test_sockmap_invalid_update(void) 241 { 242 struct test_sockmap_invalid_update *skel; 243 244 skel = test_sockmap_invalid_update__open_and_load(); 245 if (!ASSERT_NULL(skel, "open_and_load")) 246 test_sockmap_invalid_update__destroy(skel); 247 } 248 249 static void test_sockmap_copy(enum bpf_map_type map_type) 250 { 251 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 252 int err, len, src_fd, iter_fd; 253 union bpf_iter_link_info linfo = {}; 254 __u32 i, num_sockets, num_elems; 255 struct bpf_iter_sockmap *skel; 256 __s64 *sock_fd = NULL; 257 struct bpf_link *link; 258 struct bpf_map *src; 259 char buf[64]; 260 261 skel = bpf_iter_sockmap__open_and_load(); 262 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load")) 263 return; 264 265 if (map_type == BPF_MAP_TYPE_SOCKMAP) { 266 src = skel->maps.sockmap; 267 num_elems = bpf_map__max_entries(src); 268 num_sockets = num_elems - 1; 269 } else { 270 src = skel->maps.sockhash; 271 num_elems = bpf_map__max_entries(src) - 1; 272 num_sockets = num_elems; 273 } 274 275 sock_fd = calloc(num_sockets, sizeof(*sock_fd)); 276 if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)")) 277 goto out; 278 279 for (i = 0; i < num_sockets; i++) 280 sock_fd[i] = -1; 281 282 src_fd = bpf_map__fd(src); 283 284 for (i = 0; i < num_sockets; i++) { 285 sock_fd[i] = connected_socket_v4(); 286 if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4")) 287 goto out; 288 289 err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST); 290 if (!ASSERT_OK(err, "map_update")) 291 goto out; 292 } 293 294 linfo.map.map_fd = src_fd; 295 opts.link_info = &linfo; 296 opts.link_info_len = sizeof(linfo); 297 link = bpf_program__attach_iter(skel->progs.copy, &opts); 298 if (!ASSERT_OK_PTR(link, "attach_iter")) 299 goto out; 300 301 iter_fd = bpf_iter_create(bpf_link__fd(link)); 302 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 303 goto free_link; 304 305 /* do some tests */ 306 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 307 ; 308 if (!ASSERT_GE(len, 0, "read")) 309 goto close_iter; 310 311 /* test results */ 312 if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems")) 313 goto close_iter; 314 315 if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks")) 316 goto close_iter; 317 318 compare_cookies(src, skel->maps.dst); 319 320 close_iter: 321 close(iter_fd); 322 free_link: 323 bpf_link__destroy(link); 324 out: 325 for (i = 0; sock_fd && i < num_sockets; i++) 326 if (sock_fd[i] >= 0) 327 close(sock_fd[i]); 328 if (sock_fd) 329 free(sock_fd); 330 bpf_iter_sockmap__destroy(skel); 331 } 332 333 static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first, 334 enum bpf_attach_type second) 335 { 336 struct test_sockmap_skb_verdict_attach *skel; 337 int err, map, verdict; 338 339 skel = test_sockmap_skb_verdict_attach__open_and_load(); 340 if (!ASSERT_OK_PTR(skel, "open_and_load")) 341 return; 342 343 verdict = bpf_program__fd(skel->progs.prog_skb_verdict); 344 map = bpf_map__fd(skel->maps.sock_map); 345 346 err = bpf_prog_attach(verdict, map, first, 0); 347 if (!ASSERT_OK(err, "bpf_prog_attach")) 348 goto out; 349 350 err = bpf_prog_attach(verdict, map, second, 0); 351 ASSERT_EQ(err, -EBUSY, "prog_attach_fail"); 352 353 err = bpf_prog_detach2(verdict, map, first); 354 if (!ASSERT_OK(err, "bpf_prog_detach2")) 355 goto out; 356 out: 357 test_sockmap_skb_verdict_attach__destroy(skel); 358 } 359 360 static void test_sockmap_skb_verdict_attach_with_link(void) 361 { 362 struct test_sockmap_skb_verdict_attach *skel; 363 struct bpf_program *prog; 364 struct bpf_link *link; 365 int err, map; 366 367 skel = test_sockmap_skb_verdict_attach__open_and_load(); 368 if (!ASSERT_OK_PTR(skel, "open_and_load")) 369 return; 370 prog = skel->progs.prog_skb_verdict; 371 map = bpf_map__fd(skel->maps.sock_map); 372 link = bpf_program__attach_sockmap(prog, map); 373 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap")) 374 goto out; 375 376 bpf_link__detach(link); 377 378 err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0); 379 if (!ASSERT_OK(err, "bpf_prog_attach")) 380 goto out; 381 382 /* Fail since attaching with the same prog/map has been done. */ 383 link = bpf_program__attach_sockmap(prog, map); 384 if (!ASSERT_ERR_PTR(link, "bpf_program__attach_sockmap")) 385 bpf_link__detach(link); 386 387 err = bpf_prog_detach2(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT); 388 if (!ASSERT_OK(err, "bpf_prog_detach2")) 389 goto out; 390 out: 391 test_sockmap_skb_verdict_attach__destroy(skel); 392 } 393 394 static __u32 query_prog_id(int prog_fd) 395 { 396 struct bpf_prog_info info = {}; 397 __u32 info_len = sizeof(info); 398 int err; 399 400 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); 401 if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") || 402 !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd")) 403 return 0; 404 405 return info.id; 406 } 407 408 static void test_sockmap_progs_query(enum bpf_attach_type attach_type) 409 { 410 struct test_sockmap_progs_query *skel; 411 int err, map_fd, verdict_fd; 412 __u32 attach_flags = 0; 413 __u32 prog_ids[3] = {}; 414 __u32 prog_cnt = 3; 415 416 skel = test_sockmap_progs_query__open_and_load(); 417 if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load")) 418 return; 419 420 map_fd = bpf_map__fd(skel->maps.sock_map); 421 422 if (attach_type == BPF_SK_MSG_VERDICT) 423 verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict); 424 else 425 verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict); 426 427 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, 428 &attach_flags, prog_ids, &prog_cnt); 429 ASSERT_OK(err, "bpf_prog_query failed"); 430 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); 431 ASSERT_EQ(prog_cnt, 0, "wrong program count on query"); 432 433 err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0); 434 if (!ASSERT_OK(err, "bpf_prog_attach failed")) 435 goto out; 436 437 prog_cnt = 1; 438 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, 439 &attach_flags, prog_ids, &prog_cnt); 440 ASSERT_OK(err, "bpf_prog_query failed"); 441 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); 442 ASSERT_EQ(prog_cnt, 1, "wrong program count on query"); 443 ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd), 444 "wrong prog_ids on query"); 445 446 bpf_prog_detach2(verdict_fd, map_fd, attach_type); 447 out: 448 test_sockmap_progs_query__destroy(skel); 449 } 450 451 #define MAX_EVENTS 10 452 static void test_sockmap_skb_verdict_shutdown(void) 453 { 454 struct epoll_event ev, events[MAX_EVENTS]; 455 int n, err, map, verdict, s, c1 = -1, p1 = -1; 456 struct test_sockmap_pass_prog *skel; 457 int epollfd; 458 int zero = 0; 459 char b; 460 461 skel = test_sockmap_pass_prog__open_and_load(); 462 if (!ASSERT_OK_PTR(skel, "open_and_load")) 463 return; 464 465 verdict = bpf_program__fd(skel->progs.prog_skb_verdict); 466 map = bpf_map__fd(skel->maps.sock_map_rx); 467 468 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); 469 if (!ASSERT_OK(err, "bpf_prog_attach")) 470 goto out; 471 472 s = socket_loopback(AF_INET, SOCK_STREAM); 473 if (s < 0) 474 goto out; 475 err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1); 476 if (err < 0) 477 goto out; 478 479 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); 480 if (err < 0) 481 goto out_close; 482 483 shutdown(p1, SHUT_WR); 484 485 ev.events = EPOLLIN; 486 ev.data.fd = c1; 487 488 epollfd = epoll_create1(0); 489 if (!ASSERT_GT(epollfd, -1, "epoll_create(0)")) 490 goto out_close; 491 err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev); 492 if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)")) 493 goto out_close; 494 err = epoll_wait(epollfd, events, MAX_EVENTS, -1); 495 if (!ASSERT_EQ(err, 1, "epoll_wait(fd)")) 496 goto out_close; 497 498 n = recv(c1, &b, 1, SOCK_NONBLOCK); 499 ASSERT_EQ(n, 0, "recv_timeout(fin)"); 500 out_close: 501 close(c1); 502 close(p1); 503 out: 504 test_sockmap_pass_prog__destroy(skel); 505 } 506 507 static void test_sockmap_skb_verdict_fionread(bool pass_prog) 508 { 509 int expected, zero = 0, sent, recvd, avail; 510 int err, map, verdict, s, c0 = -1, c1 = -1, p0 = -1, p1 = -1; 511 struct test_sockmap_pass_prog *pass = NULL; 512 struct test_sockmap_drop_prog *drop = NULL; 513 char buf[256] = "0123456789"; 514 515 if (pass_prog) { 516 pass = test_sockmap_pass_prog__open_and_load(); 517 if (!ASSERT_OK_PTR(pass, "open_and_load")) 518 return; 519 verdict = bpf_program__fd(pass->progs.prog_skb_verdict); 520 map = bpf_map__fd(pass->maps.sock_map_rx); 521 expected = sizeof(buf); 522 } else { 523 drop = test_sockmap_drop_prog__open_and_load(); 524 if (!ASSERT_OK_PTR(drop, "open_and_load")) 525 return; 526 verdict = bpf_program__fd(drop->progs.prog_skb_verdict); 527 map = bpf_map__fd(drop->maps.sock_map_rx); 528 /* On drop data is consumed immediately and copied_seq inc'd */ 529 expected = 0; 530 } 531 532 533 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); 534 if (!ASSERT_OK(err, "bpf_prog_attach")) 535 goto out; 536 537 s = socket_loopback(AF_INET, SOCK_STREAM); 538 if (!ASSERT_GT(s, -1, "socket_loopback(s)")) 539 goto out; 540 err = create_socket_pairs(s, AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1); 541 if (!ASSERT_OK(err, "create_socket_pairs(s)")) 542 goto out; 543 544 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); 545 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)")) 546 goto out_close; 547 548 sent = xsend(p1, &buf, sizeof(buf), 0); 549 ASSERT_EQ(sent, sizeof(buf), "xsend(p0)"); 550 err = ioctl(c1, FIONREAD, &avail); 551 ASSERT_OK(err, "ioctl(FIONREAD) error"); 552 ASSERT_EQ(avail, expected, "ioctl(FIONREAD)"); 553 /* On DROP test there will be no data to read */ 554 if (pass_prog) { 555 recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC); 556 ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)"); 557 } 558 559 out_close: 560 close(c0); 561 close(p0); 562 close(c1); 563 close(p1); 564 out: 565 if (pass_prog) 566 test_sockmap_pass_prog__destroy(pass); 567 else 568 test_sockmap_drop_prog__destroy(drop); 569 } 570 571 static void test_sockmap_skb_verdict_peek_helper(int map) 572 { 573 int err, s, c1, p1, zero = 0, sent, recvd, avail; 574 char snd[256] = "0123456789"; 575 char rcv[256] = "0"; 576 577 s = socket_loopback(AF_INET, SOCK_STREAM); 578 if (!ASSERT_GT(s, -1, "socket_loopback(s)")) 579 return; 580 581 err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1); 582 if (!ASSERT_OK(err, "create_pairs(s)")) 583 return; 584 585 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); 586 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)")) 587 goto out_close; 588 589 sent = xsend(p1, snd, sizeof(snd), 0); 590 ASSERT_EQ(sent, sizeof(snd), "xsend(p1)"); 591 recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK); 592 ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)"); 593 err = ioctl(c1, FIONREAD, &avail); 594 ASSERT_OK(err, "ioctl(FIONREAD) error"); 595 ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)"); 596 recvd = recv(c1, rcv, sizeof(rcv), 0); 597 ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)"); 598 err = ioctl(c1, FIONREAD, &avail); 599 ASSERT_OK(err, "ioctl(FIONREAD) error"); 600 ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)"); 601 602 out_close: 603 close(c1); 604 close(p1); 605 } 606 607 static void test_sockmap_skb_verdict_peek(void) 608 { 609 struct test_sockmap_pass_prog *pass; 610 int err, map, verdict; 611 612 pass = test_sockmap_pass_prog__open_and_load(); 613 if (!ASSERT_OK_PTR(pass, "open_and_load")) 614 return; 615 verdict = bpf_program__fd(pass->progs.prog_skb_verdict); 616 map = bpf_map__fd(pass->maps.sock_map_rx); 617 618 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); 619 if (!ASSERT_OK(err, "bpf_prog_attach")) 620 goto out; 621 622 test_sockmap_skb_verdict_peek_helper(map); 623 624 out: 625 test_sockmap_pass_prog__destroy(pass); 626 } 627 628 static void test_sockmap_skb_verdict_peek_with_link(void) 629 { 630 struct test_sockmap_pass_prog *pass; 631 struct bpf_program *prog; 632 struct bpf_link *link; 633 int err, map; 634 635 pass = test_sockmap_pass_prog__open_and_load(); 636 if (!ASSERT_OK_PTR(pass, "open_and_load")) 637 return; 638 prog = pass->progs.prog_skb_verdict; 639 map = bpf_map__fd(pass->maps.sock_map_rx); 640 link = bpf_program__attach_sockmap(prog, map); 641 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap")) 642 goto out; 643 644 err = bpf_link__update_program(link, pass->progs.prog_skb_verdict_clone); 645 if (!ASSERT_OK(err, "bpf_link__update_program")) 646 goto out; 647 648 /* Fail since a prog with different attach type attempts to do update. */ 649 err = bpf_link__update_program(link, pass->progs.prog_skb_parser); 650 if (!ASSERT_ERR(err, "bpf_link__update_program")) 651 goto out; 652 653 test_sockmap_skb_verdict_peek_helper(map); 654 ASSERT_EQ(pass->bss->clone_called, 1, "clone_called"); 655 out: 656 bpf_link__detach(link); 657 test_sockmap_pass_prog__destroy(pass); 658 } 659 660 static void test_sockmap_unconnected_unix(void) 661 { 662 int err, map, stream = 0, dgram = 0, zero = 0; 663 struct test_sockmap_pass_prog *skel; 664 665 skel = test_sockmap_pass_prog__open_and_load(); 666 if (!ASSERT_OK_PTR(skel, "open_and_load")) 667 return; 668 669 map = bpf_map__fd(skel->maps.sock_map_rx); 670 671 stream = xsocket(AF_UNIX, SOCK_STREAM, 0); 672 if (stream < 0) 673 return; 674 675 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 676 if (dgram < 0) { 677 close(stream); 678 return; 679 } 680 681 err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY); 682 ASSERT_ERR(err, "bpf_map_update_elem(stream)"); 683 684 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY); 685 ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 686 687 close(stream); 688 close(dgram); 689 } 690 691 static void test_sockmap_many_socket(void) 692 { 693 struct test_sockmap_pass_prog *skel; 694 int stream[2], dgram, udp, tcp; 695 int i, err, map, entry = 0; 696 697 skel = test_sockmap_pass_prog__open_and_load(); 698 if (!ASSERT_OK_PTR(skel, "open_and_load")) 699 return; 700 701 map = bpf_map__fd(skel->maps.sock_map_rx); 702 703 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 704 if (dgram < 0) { 705 test_sockmap_pass_prog__destroy(skel); 706 return; 707 } 708 709 tcp = connected_socket_v4(); 710 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) { 711 close(dgram); 712 test_sockmap_pass_prog__destroy(skel); 713 return; 714 } 715 716 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0); 717 if (udp < 0) { 718 close(dgram); 719 close(tcp); 720 test_sockmap_pass_prog__destroy(skel); 721 return; 722 } 723 724 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream); 725 ASSERT_OK(err, "socketpair(af_unix, sock_stream)"); 726 if (err) 727 goto out; 728 729 for (i = 0; i < 2; i++, entry++) { 730 err = bpf_map_update_elem(map, &entry, &stream[0], BPF_ANY); 731 ASSERT_OK(err, "bpf_map_update_elem(stream)"); 732 } 733 for (i = 0; i < 2; i++, entry++) { 734 err = bpf_map_update_elem(map, &entry, &dgram, BPF_ANY); 735 ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 736 } 737 for (i = 0; i < 2; i++, entry++) { 738 err = bpf_map_update_elem(map, &entry, &udp, BPF_ANY); 739 ASSERT_OK(err, "bpf_map_update_elem(udp)"); 740 } 741 for (i = 0; i < 2; i++, entry++) { 742 err = bpf_map_update_elem(map, &entry, &tcp, BPF_ANY); 743 ASSERT_OK(err, "bpf_map_update_elem(tcp)"); 744 } 745 for (entry--; entry >= 0; entry--) { 746 err = bpf_map_delete_elem(map, &entry); 747 ASSERT_OK(err, "bpf_map_delete_elem(entry)"); 748 } 749 750 close(stream[0]); 751 close(stream[1]); 752 out: 753 close(dgram); 754 close(tcp); 755 close(udp); 756 test_sockmap_pass_prog__destroy(skel); 757 } 758 759 static void test_sockmap_many_maps(void) 760 { 761 struct test_sockmap_pass_prog *skel; 762 int stream[2], dgram, udp, tcp; 763 int i, err, map[2], entry = 0; 764 765 skel = test_sockmap_pass_prog__open_and_load(); 766 if (!ASSERT_OK_PTR(skel, "open_and_load")) 767 return; 768 769 map[0] = bpf_map__fd(skel->maps.sock_map_rx); 770 map[1] = bpf_map__fd(skel->maps.sock_map_tx); 771 772 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 773 if (dgram < 0) { 774 test_sockmap_pass_prog__destroy(skel); 775 return; 776 } 777 778 tcp = connected_socket_v4(); 779 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) { 780 close(dgram); 781 test_sockmap_pass_prog__destroy(skel); 782 return; 783 } 784 785 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0); 786 if (udp < 0) { 787 close(dgram); 788 close(tcp); 789 test_sockmap_pass_prog__destroy(skel); 790 return; 791 } 792 793 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream); 794 ASSERT_OK(err, "socketpair(af_unix, sock_stream)"); 795 if (err) 796 goto out; 797 798 for (i = 0; i < 2; i++, entry++) { 799 err = bpf_map_update_elem(map[i], &entry, &stream[0], BPF_ANY); 800 ASSERT_OK(err, "bpf_map_update_elem(stream)"); 801 } 802 for (i = 0; i < 2; i++, entry++) { 803 err = bpf_map_update_elem(map[i], &entry, &dgram, BPF_ANY); 804 ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 805 } 806 for (i = 0; i < 2; i++, entry++) { 807 err = bpf_map_update_elem(map[i], &entry, &udp, BPF_ANY); 808 ASSERT_OK(err, "bpf_map_update_elem(udp)"); 809 } 810 for (i = 0; i < 2; i++, entry++) { 811 err = bpf_map_update_elem(map[i], &entry, &tcp, BPF_ANY); 812 ASSERT_OK(err, "bpf_map_update_elem(tcp)"); 813 } 814 for (entry--; entry >= 0; entry--) { 815 err = bpf_map_delete_elem(map[1], &entry); 816 entry--; 817 ASSERT_OK(err, "bpf_map_delete_elem(entry)"); 818 err = bpf_map_delete_elem(map[0], &entry); 819 ASSERT_OK(err, "bpf_map_delete_elem(entry)"); 820 } 821 822 close(stream[0]); 823 close(stream[1]); 824 out: 825 close(dgram); 826 close(tcp); 827 close(udp); 828 test_sockmap_pass_prog__destroy(skel); 829 } 830 831 static void test_sockmap_same_sock(void) 832 { 833 struct test_sockmap_pass_prog *skel; 834 int stream[2], dgram, udp, tcp; 835 int i, err, map, zero = 0; 836 837 skel = test_sockmap_pass_prog__open_and_load(); 838 if (!ASSERT_OK_PTR(skel, "open_and_load")) 839 return; 840 841 map = bpf_map__fd(skel->maps.sock_map_rx); 842 843 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0); 844 if (dgram < 0) { 845 test_sockmap_pass_prog__destroy(skel); 846 return; 847 } 848 849 tcp = connected_socket_v4(); 850 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) { 851 close(dgram); 852 test_sockmap_pass_prog__destroy(skel); 853 return; 854 } 855 856 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0); 857 if (udp < 0) { 858 close(dgram); 859 close(tcp); 860 test_sockmap_pass_prog__destroy(skel); 861 return; 862 } 863 864 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream); 865 ASSERT_OK(err, "socketpair(af_unix, sock_stream)"); 866 if (err) 867 goto out; 868 869 for (i = 0; i < 2; i++) { 870 err = bpf_map_update_elem(map, &zero, &stream[0], BPF_ANY); 871 ASSERT_OK(err, "bpf_map_update_elem(stream)"); 872 } 873 for (i = 0; i < 2; i++) { 874 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY); 875 ASSERT_OK(err, "bpf_map_update_elem(dgram)"); 876 } 877 for (i = 0; i < 2; i++) { 878 err = bpf_map_update_elem(map, &zero, &udp, BPF_ANY); 879 ASSERT_OK(err, "bpf_map_update_elem(udp)"); 880 } 881 for (i = 0; i < 2; i++) { 882 err = bpf_map_update_elem(map, &zero, &tcp, BPF_ANY); 883 ASSERT_OK(err, "bpf_map_update_elem(tcp)"); 884 } 885 886 err = bpf_map_delete_elem(map, &zero); 887 ASSERT_OK(err, "bpf_map_delete_elem(entry)"); 888 889 close(stream[0]); 890 close(stream[1]); 891 out: 892 close(dgram); 893 close(tcp); 894 close(udp); 895 test_sockmap_pass_prog__destroy(skel); 896 } 897 898 void test_sockmap_basic(void) 899 { 900 if (test__start_subtest("sockmap create_update_free")) 901 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP); 902 if (test__start_subtest("sockhash create_update_free")) 903 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH); 904 if (test__start_subtest("sockmap sk_msg load helpers")) 905 test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP); 906 if (test__start_subtest("sockhash sk_msg load helpers")) 907 test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH); 908 if (test__start_subtest("sockmap update")) 909 test_sockmap_update(BPF_MAP_TYPE_SOCKMAP); 910 if (test__start_subtest("sockhash update")) 911 test_sockmap_update(BPF_MAP_TYPE_SOCKHASH); 912 if (test__start_subtest("sockmap update in unsafe context")) 913 test_sockmap_invalid_update(); 914 if (test__start_subtest("sockmap copy")) 915 test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP); 916 if (test__start_subtest("sockhash copy")) 917 test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH); 918 if (test__start_subtest("sockmap skb_verdict attach")) { 919 test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT, 920 BPF_SK_SKB_STREAM_VERDICT); 921 test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT, 922 BPF_SK_SKB_VERDICT); 923 } 924 if (test__start_subtest("sockmap skb_verdict attach_with_link")) 925 test_sockmap_skb_verdict_attach_with_link(); 926 if (test__start_subtest("sockmap msg_verdict progs query")) 927 test_sockmap_progs_query(BPF_SK_MSG_VERDICT); 928 if (test__start_subtest("sockmap stream_parser progs query")) 929 test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER); 930 if (test__start_subtest("sockmap stream_verdict progs query")) 931 test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT); 932 if (test__start_subtest("sockmap skb_verdict progs query")) 933 test_sockmap_progs_query(BPF_SK_SKB_VERDICT); 934 if (test__start_subtest("sockmap skb_verdict shutdown")) 935 test_sockmap_skb_verdict_shutdown(); 936 if (test__start_subtest("sockmap skb_verdict fionread")) 937 test_sockmap_skb_verdict_fionread(true); 938 if (test__start_subtest("sockmap skb_verdict fionread on drop")) 939 test_sockmap_skb_verdict_fionread(false); 940 if (test__start_subtest("sockmap skb_verdict msg_f_peek")) 941 test_sockmap_skb_verdict_peek(); 942 if (test__start_subtest("sockmap skb_verdict msg_f_peek with link")) 943 test_sockmap_skb_verdict_peek_with_link(); 944 if (test__start_subtest("sockmap unconnected af_unix")) 945 test_sockmap_unconnected_unix(); 946 if (test__start_subtest("sockmap one socket to many map entries")) 947 test_sockmap_many_socket(); 948 if (test__start_subtest("sockmap one socket to many maps")) 949 test_sockmap_many_maps(); 950 if (test__start_subtest("sockmap same socket replace")) 951 test_sockmap_same_sock(); 952 if (test__start_subtest("sockmap sk_msg attach sockmap helpers with link")) 953 test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKMAP); 954 if (test__start_subtest("sockhash sk_msg attach sockhash helpers with link")) 955 test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKHASH); 956 } 957