1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2020 Intel Corporation. */ 3 4 /* 5 * Some functions in this program are taken from 6 * Linux kernel samples/bpf/xdpsock* and modified 7 * for use. 8 * 9 * See test_xsk.sh for detailed information on test topology 10 * and prerequisite network setup. 11 * 12 * This test program contains two threads, each thread is single socket with 13 * a unique UMEM. It validates in-order packet delivery and packet content 14 * by sending packets to each other. 15 * 16 * Tests Information: 17 * ------------------ 18 * These selftests test AF_XDP SKB and Native/DRV modes using veth 19 * Virtual Ethernet interfaces. 20 * 21 * For each mode, the following tests are run: 22 * a. nopoll - soft-irq processing in run-to-completion mode 23 * b. poll - using poll() syscall 24 * c. Socket Teardown 25 * Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy 26 * both sockets, then repeat multiple times. Only nopoll mode is used 27 * d. Bi-directional sockets 28 * Configure sockets as bi-directional tx/rx sockets, sets up fill and 29 * completion rings on each socket, tx/rx in both directions. Only nopoll 30 * mode is used 31 * e. Statistics 32 * Trigger some error conditions and ensure that the appropriate statistics 33 * are incremented. Within this test, the following statistics are tested: 34 * i. rx dropped 35 * Increase the UMEM frame headroom to a value which results in 36 * insufficient space in the rx buffer for both the packet and the headroom. 37 * ii. tx invalid 38 * Set the 'len' field of tx descriptors to an invalid value (umem frame 39 * size + 1). 40 * iii. rx ring full 41 * Reduce the size of the RX ring to a fraction of the fill ring size. 42 * iv. fill queue empty 43 * Do not populate the fill queue and then try to receive pkts. 44 * f. bpf_link resource persistence 45 * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0, 46 * then remove xsk sockets from queue 0 on both veth interfaces and 47 * finally run a traffic on queues ids 1 48 * g. unaligned mode 49 * h. tests for invalid and corner case Tx descriptors so that the correct ones 50 * are discarded and let through, respectively. 51 * i. 2K frame size tests 52 * j. If multi-buffer is supported, send 9k packets divided into 3 frames 53 * k. If multi-buffer and huge pages are supported, send 9k packets in a single frame 54 * using unaligned mode 55 * l. If multi-buffer is supported, try various nasty combinations of descriptors to 56 * check if they pass the validation or not 57 * 58 * Flow: 59 * ----- 60 * - Single process spawns two threads: Tx and Rx 61 * - Each of these two threads attach to a veth interface 62 * - Each thread creates one AF_XDP socket connected to a unique umem for each 63 * veth interface 64 * - Tx thread Transmits a number of packets from veth<xxxx> to veth<yyyy> 65 * - Rx thread verifies if all packets were received and delivered in-order, 66 * and have the right content 67 * 68 * Enable/disable packet dump mode: 69 * -------------------------- 70 * To enable L2 - L4 headers and payload dump of each packet on STDOUT, add 71 * parameter -D to params array in test_xsk.sh, i.e. params=("-S" "-D") 72 */ 73 74 #define _GNU_SOURCE 75 #include <assert.h> 76 #include <fcntl.h> 77 #include <errno.h> 78 #include <getopt.h> 79 #include <linux/if_link.h> 80 #include <linux/if_ether.h> 81 #include <linux/mman.h> 82 #include <linux/netdev.h> 83 #include <linux/bitmap.h> 84 #include <linux/ethtool.h> 85 #include <arpa/inet.h> 86 #include <net/if.h> 87 #include <locale.h> 88 #include <poll.h> 89 #include <pthread.h> 90 #include <signal.h> 91 #include <stdio.h> 92 #include <stdlib.h> 93 #include <string.h> 94 #include <stddef.h> 95 #include <sys/mman.h> 96 #include <sys/socket.h> 97 #include <sys/time.h> 98 #include <sys/types.h> 99 #include <unistd.h> 100 101 #include "xsk_xdp_progs.skel.h" 102 #include "xsk.h" 103 #include "xskxceiver.h" 104 #include <bpf/bpf.h> 105 #include <linux/filter.h> 106 #include "../kselftest.h" 107 #include "xsk_xdp_common.h" 108 109 #include <network_helpers.h> 110 111 static bool opt_verbose; 112 static bool opt_print_tests; 113 static enum test_mode opt_mode = TEST_MODE_ALL; 114 static u32 opt_run_test = RUN_ALL_TESTS; 115 116 void test__fail(void) { /* for network_helpers.c */ } 117 118 static void __exit_with_error(int error, const char *file, const char *func, int line) 119 { 120 ksft_test_result_fail("[%s:%s:%i]: ERROR: %d/\"%s\"\n", file, func, line, error, 121 strerror(error)); 122 ksft_exit_xfail(); 123 } 124 125 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__) 126 #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : "" 127 static char *mode_string(struct test_spec *test) 128 { 129 switch (test->mode) { 130 case TEST_MODE_SKB: 131 return "SKB"; 132 case TEST_MODE_DRV: 133 return "DRV"; 134 case TEST_MODE_ZC: 135 return "ZC"; 136 default: 137 return "BOGUS"; 138 } 139 } 140 141 static void report_failure(struct test_spec *test) 142 { 143 if (test->fail) 144 return; 145 146 ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test), 147 test->name); 148 test->fail = true; 149 } 150 151 /* The payload is a word consisting of a packet sequence number in the upper 152 * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's 153 * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0. 154 */ 155 static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size) 156 { 157 u32 *ptr = (u32 *)dest, i; 158 159 start /= sizeof(*ptr); 160 size /= sizeof(*ptr); 161 for (i = 0; i < size; i++) 162 ptr[i] = htonl(pkt_nb << 16 | (i + start)); 163 } 164 165 static void gen_eth_hdr(struct xsk_socket_info *xsk, struct ethhdr *eth_hdr) 166 { 167 memcpy(eth_hdr->h_dest, xsk->dst_mac, ETH_ALEN); 168 memcpy(eth_hdr->h_source, xsk->src_mac, ETH_ALEN); 169 eth_hdr->h_proto = htons(ETH_P_LOOPBACK); 170 } 171 172 static bool is_umem_valid(struct ifobject *ifobj) 173 { 174 return !!ifobj->umem->umem; 175 } 176 177 static u32 mode_to_xdp_flags(enum test_mode mode) 178 { 179 return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE; 180 } 181 182 static u64 umem_size(struct xsk_umem_info *umem) 183 { 184 return umem->num_frames * umem->frame_size; 185 } 186 187 static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, 188 u64 size) 189 { 190 struct xsk_umem_config cfg = { 191 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, 192 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, 193 .frame_size = umem->frame_size, 194 .frame_headroom = umem->frame_headroom, 195 .flags = XSK_UMEM__DEFAULT_FLAGS 196 }; 197 int ret; 198 199 if (umem->fill_size) 200 cfg.fill_size = umem->fill_size; 201 202 if (umem->comp_size) 203 cfg.comp_size = umem->comp_size; 204 205 if (umem->unaligned_mode) 206 cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG; 207 208 ret = xsk_umem__create(&umem->umem, buffer, size, 209 &umem->fq, &umem->cq, &cfg); 210 if (ret) 211 return ret; 212 213 umem->buffer = buffer; 214 if (ifobj->shared_umem && ifobj->rx_on) { 215 umem->base_addr = umem_size(umem); 216 umem->next_buffer = umem_size(umem); 217 } 218 219 return 0; 220 } 221 222 static u64 umem_alloc_buffer(struct xsk_umem_info *umem) 223 { 224 u64 addr; 225 226 addr = umem->next_buffer; 227 umem->next_buffer += umem->frame_size; 228 if (umem->next_buffer >= umem->base_addr + umem_size(umem)) 229 umem->next_buffer = umem->base_addr; 230 231 return addr; 232 } 233 234 static void umem_reset_alloc(struct xsk_umem_info *umem) 235 { 236 umem->next_buffer = 0; 237 } 238 239 static void enable_busy_poll(struct xsk_socket_info *xsk) 240 { 241 int sock_opt; 242 243 sock_opt = 1; 244 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL, 245 (void *)&sock_opt, sizeof(sock_opt)) < 0) 246 exit_with_error(errno); 247 248 sock_opt = 20; 249 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL, 250 (void *)&sock_opt, sizeof(sock_opt)) < 0) 251 exit_with_error(errno); 252 253 sock_opt = xsk->batch_size; 254 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET, 255 (void *)&sock_opt, sizeof(sock_opt)) < 0) 256 exit_with_error(errno); 257 } 258 259 static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, 260 struct ifobject *ifobject, bool shared) 261 { 262 struct xsk_socket_config cfg = {}; 263 struct xsk_ring_cons *rxr; 264 struct xsk_ring_prod *txr; 265 266 xsk->umem = umem; 267 cfg.rx_size = xsk->rxqsize; 268 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; 269 cfg.bind_flags = ifobject->bind_flags; 270 if (shared) 271 cfg.bind_flags |= XDP_SHARED_UMEM; 272 if (ifobject->mtu > MAX_ETH_PKT_SIZE) 273 cfg.bind_flags |= XDP_USE_SG; 274 if (umem->comp_size) 275 cfg.tx_size = umem->comp_size; 276 if (umem->fill_size) 277 cfg.rx_size = umem->fill_size; 278 279 txr = ifobject->tx_on ? &xsk->tx : NULL; 280 rxr = ifobject->rx_on ? &xsk->rx : NULL; 281 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg); 282 } 283 284 static bool ifobj_zc_avail(struct ifobject *ifobject) 285 { 286 size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; 287 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 288 struct xsk_socket_info *xsk; 289 struct xsk_umem_info *umem; 290 bool zc_avail = false; 291 void *bufs; 292 int ret; 293 294 bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 295 if (bufs == MAP_FAILED) 296 exit_with_error(errno); 297 298 umem = calloc(1, sizeof(struct xsk_umem_info)); 299 if (!umem) { 300 munmap(bufs, umem_sz); 301 exit_with_error(ENOMEM); 302 } 303 umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 304 ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz); 305 if (ret) 306 exit_with_error(-ret); 307 308 xsk = calloc(1, sizeof(struct xsk_socket_info)); 309 if (!xsk) 310 goto out; 311 ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY; 312 ifobject->rx_on = true; 313 xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 314 ret = __xsk_configure_socket(xsk, umem, ifobject, false); 315 if (!ret) 316 zc_avail = true; 317 318 xsk_socket__delete(xsk->xsk); 319 free(xsk); 320 out: 321 munmap(umem->buffer, umem_sz); 322 xsk_umem__delete(umem->umem); 323 free(umem); 324 return zc_avail; 325 } 326 327 #define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags" 328 static unsigned int get_max_skb_frags(void) 329 { 330 unsigned int max_skb_frags = 0; 331 FILE *file; 332 333 file = fopen(MAX_SKB_FRAGS_PATH, "r"); 334 if (!file) { 335 ksft_print_msg("Error opening %s\n", MAX_SKB_FRAGS_PATH); 336 return 0; 337 } 338 339 if (fscanf(file, "%u", &max_skb_frags) != 1) 340 ksft_print_msg("Error reading %s\n", MAX_SKB_FRAGS_PATH); 341 342 fclose(file); 343 return max_skb_frags; 344 } 345 346 static struct option long_options[] = { 347 {"interface", required_argument, 0, 'i'}, 348 {"busy-poll", no_argument, 0, 'b'}, 349 {"verbose", no_argument, 0, 'v'}, 350 {"mode", required_argument, 0, 'm'}, 351 {"list", no_argument, 0, 'l'}, 352 {"test", required_argument, 0, 't'}, 353 {"help", no_argument, 0, 'h'}, 354 {0, 0, 0, 0} 355 }; 356 357 static void print_usage(char **argv) 358 { 359 const char *str = 360 " Usage: xskxceiver [OPTIONS]\n" 361 " Options:\n" 362 " -i, --interface Use interface\n" 363 " -v, --verbose Verbose output\n" 364 " -b, --busy-poll Enable busy poll\n" 365 " -m, --mode Run only mode skb, drv, or zc\n" 366 " -l, --list List all available tests\n" 367 " -t, --test Run a specific test. Enter number from -l option.\n" 368 " -h, --help Display this help and exit\n"; 369 370 ksft_print_msg(str, basename(argv[0])); 371 ksft_exit_xfail(); 372 } 373 374 static bool validate_interface(struct ifobject *ifobj) 375 { 376 if (!strcmp(ifobj->ifname, "")) 377 return false; 378 return true; 379 } 380 381 static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc, 382 char **argv) 383 { 384 struct ifobject *ifobj; 385 u32 interface_nb = 0; 386 int option_index, c; 387 388 opterr = 0; 389 390 for (;;) { 391 c = getopt_long(argc, argv, "i:vbm:lt:", long_options, &option_index); 392 if (c == -1) 393 break; 394 395 switch (c) { 396 case 'i': 397 if (interface_nb == 0) 398 ifobj = ifobj_tx; 399 else if (interface_nb == 1) 400 ifobj = ifobj_rx; 401 else 402 break; 403 404 memcpy(ifobj->ifname, optarg, 405 min_t(size_t, MAX_INTERFACE_NAME_CHARS, strlen(optarg))); 406 407 ifobj->ifindex = if_nametoindex(ifobj->ifname); 408 if (!ifobj->ifindex) 409 exit_with_error(errno); 410 411 interface_nb++; 412 break; 413 case 'v': 414 opt_verbose = true; 415 break; 416 case 'b': 417 ifobj_tx->busy_poll = true; 418 ifobj_rx->busy_poll = true; 419 break; 420 case 'm': 421 if (!strncmp("skb", optarg, strlen(optarg))) 422 opt_mode = TEST_MODE_SKB; 423 else if (!strncmp("drv", optarg, strlen(optarg))) 424 opt_mode = TEST_MODE_DRV; 425 else if (!strncmp("zc", optarg, strlen(optarg))) 426 opt_mode = TEST_MODE_ZC; 427 else 428 print_usage(argv); 429 break; 430 case 'l': 431 opt_print_tests = true; 432 break; 433 case 't': 434 errno = 0; 435 opt_run_test = strtol(optarg, NULL, 0); 436 if (errno) 437 print_usage(argv); 438 break; 439 case 'h': 440 default: 441 print_usage(argv); 442 } 443 } 444 } 445 446 static int set_ring_size(struct ifobject *ifobj) 447 { 448 int ret; 449 u32 ctr = 0; 450 451 while (ctr++ < SOCK_RECONF_CTR) { 452 ret = set_hw_ring_size(ifobj->ifname, &ifobj->ring); 453 if (!ret) 454 break; 455 456 /* Retry if it fails */ 457 if (ctr >= SOCK_RECONF_CTR || errno != EBUSY) 458 return -errno; 459 460 usleep(USLEEP_MAX); 461 } 462 463 return ret; 464 } 465 466 static int hw_ring_size_reset(struct ifobject *ifobj) 467 { 468 ifobj->ring.tx_pending = ifobj->set_ring.default_tx; 469 ifobj->ring.rx_pending = ifobj->set_ring.default_rx; 470 return set_ring_size(ifobj); 471 } 472 473 static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 474 struct ifobject *ifobj_rx) 475 { 476 u32 i, j; 477 478 for (i = 0; i < MAX_INTERFACES; i++) { 479 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 480 481 ifobj->xsk = &ifobj->xsk_arr[0]; 482 ifobj->use_poll = false; 483 ifobj->use_fill_ring = true; 484 ifobj->release_rx = true; 485 ifobj->validation_func = NULL; 486 ifobj->use_metadata = false; 487 488 if (i == 0) { 489 ifobj->rx_on = false; 490 ifobj->tx_on = true; 491 } else { 492 ifobj->rx_on = true; 493 ifobj->tx_on = false; 494 } 495 496 memset(ifobj->umem, 0, sizeof(*ifobj->umem)); 497 ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS; 498 ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 499 500 for (j = 0; j < MAX_SOCKETS; j++) { 501 memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); 502 ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 503 ifobj->xsk_arr[j].batch_size = DEFAULT_BATCH_SIZE; 504 if (i == 0) 505 ifobj->xsk_arr[j].pkt_stream = test->tx_pkt_stream_default; 506 else 507 ifobj->xsk_arr[j].pkt_stream = test->rx_pkt_stream_default; 508 509 memcpy(ifobj->xsk_arr[j].src_mac, g_mac, ETH_ALEN); 510 memcpy(ifobj->xsk_arr[j].dst_mac, g_mac, ETH_ALEN); 511 ifobj->xsk_arr[j].src_mac[5] += ((j * 2) + 0); 512 ifobj->xsk_arr[j].dst_mac[5] += ((j * 2) + 1); 513 } 514 } 515 516 if (ifobj_tx->hw_ring_size_supp) 517 hw_ring_size_reset(ifobj_tx); 518 519 test->ifobj_tx = ifobj_tx; 520 test->ifobj_rx = ifobj_rx; 521 test->current_step = 0; 522 test->total_steps = 1; 523 test->nb_sockets = 1; 524 test->fail = false; 525 test->set_ring = false; 526 test->mtu = MAX_ETH_PKT_SIZE; 527 test->xdp_prog_rx = ifobj_rx->xdp_progs->progs.xsk_def_prog; 528 test->xskmap_rx = ifobj_rx->xdp_progs->maps.xsk; 529 test->xdp_prog_tx = ifobj_tx->xdp_progs->progs.xsk_def_prog; 530 test->xskmap_tx = ifobj_tx->xdp_progs->maps.xsk; 531 } 532 533 static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 534 struct ifobject *ifobj_rx, enum test_mode mode, 535 const struct test_spec *test_to_run) 536 { 537 struct pkt_stream *tx_pkt_stream; 538 struct pkt_stream *rx_pkt_stream; 539 u32 i; 540 541 tx_pkt_stream = test->tx_pkt_stream_default; 542 rx_pkt_stream = test->rx_pkt_stream_default; 543 memset(test, 0, sizeof(*test)); 544 test->tx_pkt_stream_default = tx_pkt_stream; 545 test->rx_pkt_stream_default = rx_pkt_stream; 546 547 for (i = 0; i < MAX_INTERFACES; i++) { 548 struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 549 550 ifobj->bind_flags = XDP_USE_NEED_WAKEUP; 551 if (mode == TEST_MODE_ZC) 552 ifobj->bind_flags |= XDP_ZEROCOPY; 553 else 554 ifobj->bind_flags |= XDP_COPY; 555 } 556 557 strncpy(test->name, test_to_run->name, MAX_TEST_NAME_SIZE); 558 test->test_func = test_to_run->test_func; 559 test->mode = mode; 560 __test_spec_init(test, ifobj_tx, ifobj_rx); 561 } 562 563 static void test_spec_reset(struct test_spec *test) 564 { 565 __test_spec_init(test, test->ifobj_tx, test->ifobj_rx); 566 } 567 568 static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *xdp_prog_rx, 569 struct bpf_program *xdp_prog_tx, struct bpf_map *xskmap_rx, 570 struct bpf_map *xskmap_tx) 571 { 572 test->xdp_prog_rx = xdp_prog_rx; 573 test->xdp_prog_tx = xdp_prog_tx; 574 test->xskmap_rx = xskmap_rx; 575 test->xskmap_tx = xskmap_tx; 576 } 577 578 static int test_spec_set_mtu(struct test_spec *test, int mtu) 579 { 580 int err; 581 582 if (test->ifobj_rx->mtu != mtu) { 583 err = xsk_set_mtu(test->ifobj_rx->ifindex, mtu); 584 if (err) 585 return err; 586 test->ifobj_rx->mtu = mtu; 587 } 588 if (test->ifobj_tx->mtu != mtu) { 589 err = xsk_set_mtu(test->ifobj_tx->ifindex, mtu); 590 if (err) 591 return err; 592 test->ifobj_tx->mtu = mtu; 593 } 594 595 return 0; 596 } 597 598 static void pkt_stream_reset(struct pkt_stream *pkt_stream) 599 { 600 if (pkt_stream) { 601 pkt_stream->current_pkt_nb = 0; 602 pkt_stream->nb_rx_pkts = 0; 603 } 604 } 605 606 static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream) 607 { 608 if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) 609 return NULL; 610 611 return &pkt_stream->pkts[pkt_stream->current_pkt_nb++]; 612 } 613 614 static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent) 615 { 616 while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { 617 (*pkts_sent)++; 618 if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid) 619 return &pkt_stream->pkts[pkt_stream->current_pkt_nb++]; 620 pkt_stream->current_pkt_nb++; 621 } 622 return NULL; 623 } 624 625 static void pkt_stream_delete(struct pkt_stream *pkt_stream) 626 { 627 free(pkt_stream->pkts); 628 free(pkt_stream); 629 } 630 631 static void pkt_stream_restore_default(struct test_spec *test) 632 { 633 struct pkt_stream *tx_pkt_stream = test->ifobj_tx->xsk->pkt_stream; 634 struct pkt_stream *rx_pkt_stream = test->ifobj_rx->xsk->pkt_stream; 635 636 if (tx_pkt_stream != test->tx_pkt_stream_default) { 637 pkt_stream_delete(test->ifobj_tx->xsk->pkt_stream); 638 test->ifobj_tx->xsk->pkt_stream = test->tx_pkt_stream_default; 639 } 640 641 if (rx_pkt_stream != test->rx_pkt_stream_default) { 642 pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream); 643 test->ifobj_rx->xsk->pkt_stream = test->rx_pkt_stream_default; 644 } 645 } 646 647 static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) 648 { 649 struct pkt_stream *pkt_stream; 650 651 pkt_stream = calloc(1, sizeof(*pkt_stream)); 652 if (!pkt_stream) 653 return NULL; 654 655 pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts)); 656 if (!pkt_stream->pkts) { 657 free(pkt_stream); 658 return NULL; 659 } 660 661 pkt_stream->nb_pkts = nb_pkts; 662 return pkt_stream; 663 } 664 665 static bool pkt_continues(u32 options) 666 { 667 return options & XDP_PKT_CONTD; 668 } 669 670 static u32 ceil_u32(u32 a, u32 b) 671 { 672 return (a + b - 1) / b; 673 } 674 675 static u32 pkt_nb_frags(u32 frame_size, struct pkt_stream *pkt_stream, struct pkt *pkt) 676 { 677 u32 nb_frags = 1, next_frag; 678 679 if (!pkt) 680 return 1; 681 682 if (!pkt_stream->verbatim) { 683 if (!pkt->valid || !pkt->len) 684 return 1; 685 return ceil_u32(pkt->len, frame_size); 686 } 687 688 /* Search for the end of the packet in verbatim mode */ 689 if (!pkt_continues(pkt->options)) 690 return nb_frags; 691 692 next_frag = pkt_stream->current_pkt_nb; 693 pkt++; 694 while (next_frag++ < pkt_stream->nb_pkts) { 695 nb_frags++; 696 if (!pkt_continues(pkt->options) || !pkt->valid) 697 break; 698 pkt++; 699 } 700 return nb_frags; 701 } 702 703 static bool set_pkt_valid(int offset, u32 len) 704 { 705 return len <= MAX_ETH_JUMBO_SIZE; 706 } 707 708 static void pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len) 709 { 710 pkt->offset = offset; 711 pkt->len = len; 712 pkt->valid = set_pkt_valid(offset, len); 713 } 714 715 static void pkt_stream_pkt_set(struct pkt_stream *pkt_stream, struct pkt *pkt, int offset, u32 len) 716 { 717 bool prev_pkt_valid = pkt->valid; 718 719 pkt_set(pkt_stream, pkt, offset, len); 720 pkt_stream->nb_valid_entries += pkt->valid - prev_pkt_valid; 721 } 722 723 static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len) 724 { 725 return ceil_u32(len, umem->frame_size) * umem->frame_size; 726 } 727 728 static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb_start, u32 nb_off) 729 { 730 struct pkt_stream *pkt_stream; 731 u32 i; 732 733 pkt_stream = __pkt_stream_alloc(nb_pkts); 734 if (!pkt_stream) 735 exit_with_error(ENOMEM); 736 737 pkt_stream->nb_pkts = nb_pkts; 738 pkt_stream->max_pkt_len = pkt_len; 739 for (i = 0; i < nb_pkts; i++) { 740 struct pkt *pkt = &pkt_stream->pkts[i]; 741 742 pkt_stream_pkt_set(pkt_stream, pkt, 0, pkt_len); 743 pkt->pkt_nb = nb_start + i * nb_off; 744 } 745 746 return pkt_stream; 747 } 748 749 static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len) 750 { 751 return __pkt_stream_generate(nb_pkts, pkt_len, 0, 1); 752 } 753 754 static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream) 755 { 756 return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len); 757 } 758 759 static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) 760 { 761 struct pkt_stream *pkt_stream; 762 763 pkt_stream = pkt_stream_generate(nb_pkts, pkt_len); 764 test->ifobj_tx->xsk->pkt_stream = pkt_stream; 765 pkt_stream = pkt_stream_generate(nb_pkts, pkt_len); 766 test->ifobj_rx->xsk->pkt_stream = pkt_stream; 767 } 768 769 static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, 770 int offset) 771 { 772 struct pkt_stream *pkt_stream; 773 u32 i; 774 775 pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream); 776 for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2) 777 pkt_stream_pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len); 778 779 ifobj->xsk->pkt_stream = pkt_stream; 780 } 781 782 static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) 783 { 784 __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset); 785 __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset); 786 } 787 788 static void pkt_stream_receive_half(struct test_spec *test) 789 { 790 struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream; 791 u32 i; 792 793 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts, 794 pkt_stream->pkts[0].len); 795 pkt_stream = test->ifobj_rx->xsk->pkt_stream; 796 for (i = 1; i < pkt_stream->nb_pkts; i += 2) 797 pkt_stream->pkts[i].valid = false; 798 799 pkt_stream->nb_valid_entries /= 2; 800 } 801 802 static void pkt_stream_even_odd_sequence(struct test_spec *test) 803 { 804 struct pkt_stream *pkt_stream; 805 u32 i; 806 807 for (i = 0; i < test->nb_sockets; i++) { 808 pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream; 809 pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2, 810 pkt_stream->pkts[0].len, i, 2); 811 test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream; 812 813 pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream; 814 pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2, 815 pkt_stream->pkts[0].len, i, 2); 816 test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream; 817 } 818 } 819 820 static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem) 821 { 822 if (!pkt->valid) 823 return pkt->offset; 824 return pkt->offset + umem_alloc_buffer(umem); 825 } 826 827 static void pkt_stream_cancel(struct pkt_stream *pkt_stream) 828 { 829 pkt_stream->current_pkt_nb--; 830 } 831 832 static void pkt_generate(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, u64 addr, u32 len, 833 u32 pkt_nb, u32 bytes_written) 834 { 835 void *data = xsk_umem__get_data(umem->buffer, addr); 836 837 if (len < MIN_PKT_SIZE) 838 return; 839 840 if (!bytes_written) { 841 gen_eth_hdr(xsk, data); 842 843 len -= PKT_HDR_SIZE; 844 data += PKT_HDR_SIZE; 845 } else { 846 bytes_written -= PKT_HDR_SIZE; 847 } 848 849 write_payload(data, pkt_nb, bytes_written, len); 850 } 851 852 static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, struct pkt *frames, 853 u32 nb_frames, bool verbatim) 854 { 855 u32 i, len = 0, pkt_nb = 0, payload = 0; 856 struct pkt_stream *pkt_stream; 857 858 pkt_stream = __pkt_stream_alloc(nb_frames); 859 if (!pkt_stream) 860 exit_with_error(ENOMEM); 861 862 for (i = 0; i < nb_frames; i++) { 863 struct pkt *pkt = &pkt_stream->pkts[pkt_nb]; 864 struct pkt *frame = &frames[i]; 865 866 pkt->offset = frame->offset; 867 if (verbatim) { 868 *pkt = *frame; 869 pkt->pkt_nb = payload; 870 if (!frame->valid || !pkt_continues(frame->options)) 871 payload++; 872 } else { 873 if (frame->valid) 874 len += frame->len; 875 if (frame->valid && pkt_continues(frame->options)) 876 continue; 877 878 pkt->pkt_nb = pkt_nb; 879 pkt->len = len; 880 pkt->valid = frame->valid; 881 pkt->options = 0; 882 883 len = 0; 884 } 885 886 print_verbose("offset: %d len: %u valid: %u options: %u pkt_nb: %u\n", 887 pkt->offset, pkt->len, pkt->valid, pkt->options, pkt->pkt_nb); 888 889 if (pkt->valid && pkt->len > pkt_stream->max_pkt_len) 890 pkt_stream->max_pkt_len = pkt->len; 891 892 if (pkt->valid) 893 pkt_stream->nb_valid_entries++; 894 895 pkt_nb++; 896 } 897 898 pkt_stream->nb_pkts = pkt_nb; 899 pkt_stream->verbatim = verbatim; 900 return pkt_stream; 901 } 902 903 static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) 904 { 905 struct pkt_stream *pkt_stream; 906 907 pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true); 908 test->ifobj_tx->xsk->pkt_stream = pkt_stream; 909 910 pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false); 911 test->ifobj_rx->xsk->pkt_stream = pkt_stream; 912 } 913 914 static void pkt_print_data(u32 *data, u32 cnt) 915 { 916 u32 i; 917 918 for (i = 0; i < cnt; i++) { 919 u32 seqnum, pkt_nb; 920 921 seqnum = ntohl(*data) & 0xffff; 922 pkt_nb = ntohl(*data) >> 16; 923 ksft_print_msg("%u:%u ", pkt_nb, seqnum); 924 data++; 925 } 926 } 927 928 static void pkt_dump(void *pkt, u32 len, bool eth_header) 929 { 930 struct ethhdr *ethhdr = pkt; 931 u32 i, *data; 932 933 if (eth_header) { 934 /*extract L2 frame */ 935 ksft_print_msg("DEBUG>> L2: dst mac: "); 936 for (i = 0; i < ETH_ALEN; i++) 937 ksft_print_msg("%02X", ethhdr->h_dest[i]); 938 939 ksft_print_msg("\nDEBUG>> L2: src mac: "); 940 for (i = 0; i < ETH_ALEN; i++) 941 ksft_print_msg("%02X", ethhdr->h_source[i]); 942 943 data = pkt + PKT_HDR_SIZE; 944 } else { 945 data = pkt; 946 } 947 948 /*extract L5 frame */ 949 ksft_print_msg("\nDEBUG>> L5: seqnum: "); 950 pkt_print_data(data, PKT_DUMP_NB_TO_PRINT); 951 ksft_print_msg("...."); 952 if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) { 953 ksft_print_msg("\n.... "); 954 pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT, 955 PKT_DUMP_NB_TO_PRINT); 956 } 957 ksft_print_msg("\n---------------------------------------\n"); 958 } 959 960 static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr) 961 { 962 u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom; 963 u32 offset = addr % umem->frame_size, expected_offset; 964 int pkt_offset = pkt->valid ? pkt->offset : 0; 965 966 if (!umem->unaligned_mode) 967 pkt_offset = 0; 968 969 expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size; 970 971 if (offset == expected_offset) 972 return true; 973 974 ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset); 975 return false; 976 } 977 978 static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) 979 { 980 void *data = xsk_umem__get_data(buffer, addr); 981 struct xdp_info *meta = data - sizeof(struct xdp_info); 982 983 if (meta->count != pkt->pkt_nb) { 984 ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n", 985 __func__, pkt->pkt_nb, 986 (unsigned long long)meta->count); 987 return false; 988 } 989 990 return true; 991 } 992 993 static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb, 994 u32 bytes_processed) 995 { 996 u32 seqnum, pkt_nb, *pkt_data, words_to_end, expected_seqnum; 997 void *data = xsk_umem__get_data(umem->buffer, addr); 998 999 addr -= umem->base_addr; 1000 1001 if (addr >= umem->num_frames * umem->frame_size || 1002 addr + len > umem->num_frames * umem->frame_size) { 1003 ksft_print_msg("Frag invalid addr: %llx len: %u\n", 1004 (unsigned long long)addr, len); 1005 return false; 1006 } 1007 if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) { 1008 ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n", 1009 (unsigned long long)addr, len); 1010 return false; 1011 } 1012 1013 pkt_data = data; 1014 if (!bytes_processed) { 1015 pkt_data += PKT_HDR_SIZE / sizeof(*pkt_data); 1016 len -= PKT_HDR_SIZE; 1017 } else { 1018 bytes_processed -= PKT_HDR_SIZE; 1019 } 1020 1021 expected_seqnum = bytes_processed / sizeof(*pkt_data); 1022 seqnum = ntohl(*pkt_data) & 0xffff; 1023 pkt_nb = ntohl(*pkt_data) >> 16; 1024 1025 if (expected_pkt_nb != pkt_nb) { 1026 ksft_print_msg("[%s] expected pkt_nb [%u], got pkt_nb [%u]\n", 1027 __func__, expected_pkt_nb, pkt_nb); 1028 goto error; 1029 } 1030 if (expected_seqnum != seqnum) { 1031 ksft_print_msg("[%s] expected seqnum at start [%u], got seqnum [%u]\n", 1032 __func__, expected_seqnum, seqnum); 1033 goto error; 1034 } 1035 1036 words_to_end = len / sizeof(*pkt_data) - 1; 1037 pkt_data += words_to_end; 1038 seqnum = ntohl(*pkt_data) & 0xffff; 1039 expected_seqnum += words_to_end; 1040 if (expected_seqnum != seqnum) { 1041 ksft_print_msg("[%s] expected seqnum at end [%u], got seqnum [%u]\n", 1042 __func__, expected_seqnum, seqnum); 1043 goto error; 1044 } 1045 1046 return true; 1047 1048 error: 1049 pkt_dump(data, len, !bytes_processed); 1050 return false; 1051 } 1052 1053 static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) 1054 { 1055 if (pkt->len != len) { 1056 ksft_print_msg("[%s] expected packet length [%d], got length [%d]\n", 1057 __func__, pkt->len, len); 1058 pkt_dump(xsk_umem__get_data(buffer, addr), len, true); 1059 return false; 1060 } 1061 1062 return true; 1063 } 1064 1065 static int kick_tx(struct xsk_socket_info *xsk) 1066 { 1067 int ret; 1068 1069 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); 1070 if (ret >= 0) 1071 return TEST_PASS; 1072 if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) { 1073 usleep(100); 1074 return TEST_PASS; 1075 } 1076 return TEST_FAILURE; 1077 } 1078 1079 static int kick_rx(struct xsk_socket_info *xsk) 1080 { 1081 int ret; 1082 1083 ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); 1084 if (ret < 0) 1085 return TEST_FAILURE; 1086 1087 return TEST_PASS; 1088 } 1089 1090 static int complete_pkts(struct xsk_socket_info *xsk, int batch_size) 1091 { 1092 unsigned int rcvd; 1093 u32 idx; 1094 int ret; 1095 1096 if (xsk_ring_prod__needs_wakeup(&xsk->tx)) { 1097 ret = kick_tx(xsk); 1098 if (ret) 1099 return TEST_FAILURE; 1100 } 1101 1102 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx); 1103 if (rcvd) { 1104 if (rcvd > xsk->outstanding_tx) { 1105 u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1); 1106 1107 ksft_print_msg("[%s] Too many packets completed\n", __func__); 1108 ksft_print_msg("Last completion address: %llx\n", 1109 (unsigned long long)addr); 1110 return TEST_FAILURE; 1111 } 1112 1113 xsk_ring_cons__release(&xsk->umem->cq, rcvd); 1114 xsk->outstanding_tx -= rcvd; 1115 } 1116 1117 return TEST_PASS; 1118 } 1119 1120 static int __receive_pkts(struct test_spec *test, struct xsk_socket_info *xsk) 1121 { 1122 u32 frags_processed = 0, nb_frags = 0, pkt_len = 0; 1123 u32 idx_rx = 0, idx_fq = 0, rcvd, pkts_sent = 0; 1124 struct pkt_stream *pkt_stream = xsk->pkt_stream; 1125 struct ifobject *ifobj = test->ifobj_rx; 1126 struct xsk_umem_info *umem = xsk->umem; 1127 struct pollfd fds = { }; 1128 struct pkt *pkt; 1129 u64 first_addr = 0; 1130 int ret; 1131 1132 fds.fd = xsk_socket__fd(xsk->xsk); 1133 fds.events = POLLIN; 1134 1135 ret = kick_rx(xsk); 1136 if (ret) 1137 return TEST_FAILURE; 1138 1139 if (ifobj->use_poll) { 1140 ret = poll(&fds, 1, POLL_TMOUT); 1141 if (ret < 0) 1142 return TEST_FAILURE; 1143 1144 if (!ret) { 1145 if (!is_umem_valid(test->ifobj_tx)) 1146 return TEST_PASS; 1147 1148 ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__); 1149 return TEST_CONTINUE; 1150 } 1151 1152 if (!(fds.revents & POLLIN)) 1153 return TEST_CONTINUE; 1154 } 1155 1156 rcvd = xsk_ring_cons__peek(&xsk->rx, xsk->batch_size, &idx_rx); 1157 if (!rcvd) 1158 return TEST_CONTINUE; 1159 1160 if (ifobj->use_fill_ring) { 1161 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 1162 while (ret != rcvd) { 1163 if (xsk_ring_prod__needs_wakeup(&umem->fq)) { 1164 ret = poll(&fds, 1, POLL_TMOUT); 1165 if (ret < 0) 1166 return TEST_FAILURE; 1167 } 1168 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 1169 } 1170 } 1171 1172 while (frags_processed < rcvd) { 1173 const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++); 1174 u64 addr = desc->addr, orig; 1175 1176 orig = xsk_umem__extract_addr(addr); 1177 addr = xsk_umem__add_offset_to_addr(addr); 1178 1179 if (!nb_frags) { 1180 pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent); 1181 if (!pkt) { 1182 ksft_print_msg("[%s] received too many packets addr: %lx len %u\n", 1183 __func__, addr, desc->len); 1184 return TEST_FAILURE; 1185 } 1186 } 1187 1188 print_verbose("Rx: addr: %lx len: %u options: %u pkt_nb: %u valid: %u\n", 1189 addr, desc->len, desc->options, pkt->pkt_nb, pkt->valid); 1190 1191 if (!is_frag_valid(umem, addr, desc->len, pkt->pkt_nb, pkt_len) || 1192 !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata && 1193 !is_metadata_correct(pkt, umem->buffer, addr))) 1194 return TEST_FAILURE; 1195 1196 if (!nb_frags++) 1197 first_addr = addr; 1198 frags_processed++; 1199 pkt_len += desc->len; 1200 if (ifobj->use_fill_ring) 1201 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig; 1202 1203 if (pkt_continues(desc->options)) 1204 continue; 1205 1206 /* The complete packet has been received */ 1207 if (!is_pkt_valid(pkt, umem->buffer, first_addr, pkt_len) || 1208 !is_offset_correct(umem, pkt, addr)) 1209 return TEST_FAILURE; 1210 1211 pkt_stream->nb_rx_pkts++; 1212 nb_frags = 0; 1213 pkt_len = 0; 1214 } 1215 1216 if (nb_frags) { 1217 /* In the middle of a packet. Start over from beginning of packet. */ 1218 idx_rx -= nb_frags; 1219 xsk_ring_cons__cancel(&xsk->rx, nb_frags); 1220 if (ifobj->use_fill_ring) { 1221 idx_fq -= nb_frags; 1222 xsk_ring_prod__cancel(&umem->fq, nb_frags); 1223 } 1224 frags_processed -= nb_frags; 1225 } 1226 1227 if (ifobj->use_fill_ring) 1228 xsk_ring_prod__submit(&umem->fq, frags_processed); 1229 if (ifobj->release_rx) 1230 xsk_ring_cons__release(&xsk->rx, frags_processed); 1231 1232 pthread_mutex_lock(&pacing_mutex); 1233 pkts_in_flight -= pkts_sent; 1234 pthread_mutex_unlock(&pacing_mutex); 1235 pkts_sent = 0; 1236 1237 return TEST_CONTINUE; 1238 } 1239 1240 bool all_packets_received(struct test_spec *test, struct xsk_socket_info *xsk, u32 sock_num, 1241 unsigned long *bitmap) 1242 { 1243 struct pkt_stream *pkt_stream = xsk->pkt_stream; 1244 1245 if (!pkt_stream) { 1246 __set_bit(sock_num, bitmap); 1247 return false; 1248 } 1249 1250 if (pkt_stream->nb_rx_pkts == pkt_stream->nb_valid_entries) { 1251 __set_bit(sock_num, bitmap); 1252 if (bitmap_full(bitmap, test->nb_sockets)) 1253 return true; 1254 } 1255 1256 return false; 1257 } 1258 1259 static int receive_pkts(struct test_spec *test) 1260 { 1261 struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0}; 1262 DECLARE_BITMAP(bitmap, test->nb_sockets); 1263 struct xsk_socket_info *xsk; 1264 u32 sock_num = 0; 1265 int res, ret; 1266 1267 ret = gettimeofday(&tv_now, NULL); 1268 if (ret) 1269 exit_with_error(errno); 1270 1271 timeradd(&tv_now, &tv_timeout, &tv_end); 1272 1273 while (1) { 1274 xsk = &test->ifobj_rx->xsk_arr[sock_num]; 1275 1276 if ((all_packets_received(test, xsk, sock_num, bitmap))) 1277 break; 1278 1279 res = __receive_pkts(test, xsk); 1280 if (!(res == TEST_PASS || res == TEST_CONTINUE)) 1281 return res; 1282 1283 ret = gettimeofday(&tv_now, NULL); 1284 if (ret) 1285 exit_with_error(errno); 1286 1287 if (timercmp(&tv_now, &tv_end, >)) { 1288 ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__); 1289 return TEST_FAILURE; 1290 } 1291 sock_num = (sock_num + 1) % test->nb_sockets; 1292 } 1293 1294 return TEST_PASS; 1295 } 1296 1297 static int __send_pkts(struct ifobject *ifobject, struct xsk_socket_info *xsk, bool timeout) 1298 { 1299 u32 i, idx = 0, valid_pkts = 0, valid_frags = 0, buffer_len; 1300 struct pkt_stream *pkt_stream = xsk->pkt_stream; 1301 struct xsk_umem_info *umem = ifobject->umem; 1302 bool use_poll = ifobject->use_poll; 1303 struct pollfd fds = { }; 1304 int ret; 1305 1306 buffer_len = pkt_get_buffer_len(umem, pkt_stream->max_pkt_len); 1307 /* pkts_in_flight might be negative if many invalid packets are sent */ 1308 if (pkts_in_flight >= (int)((umem_size(umem) - xsk->batch_size * buffer_len) / 1309 buffer_len)) { 1310 ret = kick_tx(xsk); 1311 if (ret) 1312 return TEST_FAILURE; 1313 return TEST_CONTINUE; 1314 } 1315 1316 fds.fd = xsk_socket__fd(xsk->xsk); 1317 fds.events = POLLOUT; 1318 1319 while (xsk_ring_prod__reserve(&xsk->tx, xsk->batch_size, &idx) < xsk->batch_size) { 1320 if (use_poll) { 1321 ret = poll(&fds, 1, POLL_TMOUT); 1322 if (timeout) { 1323 if (ret < 0) { 1324 ksft_print_msg("ERROR: [%s] Poll error %d\n", 1325 __func__, errno); 1326 return TEST_FAILURE; 1327 } 1328 if (ret == 0) 1329 return TEST_PASS; 1330 break; 1331 } 1332 if (ret <= 0) { 1333 ksft_print_msg("ERROR: [%s] Poll error %d\n", 1334 __func__, errno); 1335 return TEST_FAILURE; 1336 } 1337 } 1338 1339 complete_pkts(xsk, xsk->batch_size); 1340 } 1341 1342 for (i = 0; i < xsk->batch_size; i++) { 1343 struct pkt *pkt = pkt_stream_get_next_tx_pkt(pkt_stream); 1344 u32 nb_frags_left, nb_frags, bytes_written = 0; 1345 1346 if (!pkt) 1347 break; 1348 1349 nb_frags = pkt_nb_frags(umem->frame_size, pkt_stream, pkt); 1350 if (nb_frags > xsk->batch_size - i) { 1351 pkt_stream_cancel(pkt_stream); 1352 xsk_ring_prod__cancel(&xsk->tx, xsk->batch_size - i); 1353 break; 1354 } 1355 nb_frags_left = nb_frags; 1356 1357 while (nb_frags_left--) { 1358 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); 1359 1360 tx_desc->addr = pkt_get_addr(pkt, ifobject->umem); 1361 if (pkt_stream->verbatim) { 1362 tx_desc->len = pkt->len; 1363 tx_desc->options = pkt->options; 1364 } else if (nb_frags_left) { 1365 tx_desc->len = umem->frame_size; 1366 tx_desc->options = XDP_PKT_CONTD; 1367 } else { 1368 tx_desc->len = pkt->len - bytes_written; 1369 tx_desc->options = 0; 1370 } 1371 if (pkt->valid) 1372 pkt_generate(xsk, umem, tx_desc->addr, tx_desc->len, pkt->pkt_nb, 1373 bytes_written); 1374 bytes_written += tx_desc->len; 1375 1376 print_verbose("Tx addr: %llx len: %u options: %u pkt_nb: %u\n", 1377 tx_desc->addr, tx_desc->len, tx_desc->options, pkt->pkt_nb); 1378 1379 if (nb_frags_left) { 1380 i++; 1381 if (pkt_stream->verbatim) 1382 pkt = pkt_stream_get_next_tx_pkt(pkt_stream); 1383 } 1384 } 1385 1386 if (pkt && pkt->valid) { 1387 valid_pkts++; 1388 valid_frags += nb_frags; 1389 } 1390 } 1391 1392 pthread_mutex_lock(&pacing_mutex); 1393 pkts_in_flight += valid_pkts; 1394 pthread_mutex_unlock(&pacing_mutex); 1395 1396 xsk_ring_prod__submit(&xsk->tx, i); 1397 xsk->outstanding_tx += valid_frags; 1398 1399 if (use_poll) { 1400 ret = poll(&fds, 1, POLL_TMOUT); 1401 if (ret <= 0) { 1402 if (ret == 0 && timeout) 1403 return TEST_PASS; 1404 1405 ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret); 1406 return TEST_FAILURE; 1407 } 1408 } 1409 1410 if (!timeout) { 1411 if (complete_pkts(xsk, i)) 1412 return TEST_FAILURE; 1413 1414 usleep(10); 1415 return TEST_PASS; 1416 } 1417 1418 return TEST_CONTINUE; 1419 } 1420 1421 static int wait_for_tx_completion(struct xsk_socket_info *xsk) 1422 { 1423 struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0}; 1424 int ret; 1425 1426 ret = gettimeofday(&tv_now, NULL); 1427 if (ret) 1428 exit_with_error(errno); 1429 timeradd(&tv_now, &tv_timeout, &tv_end); 1430 1431 while (xsk->outstanding_tx) { 1432 ret = gettimeofday(&tv_now, NULL); 1433 if (ret) 1434 exit_with_error(errno); 1435 if (timercmp(&tv_now, &tv_end, >)) { 1436 ksft_print_msg("ERROR: [%s] Transmission loop timed out\n", __func__); 1437 return TEST_FAILURE; 1438 } 1439 1440 complete_pkts(xsk, xsk->batch_size); 1441 } 1442 1443 return TEST_PASS; 1444 } 1445 1446 bool all_packets_sent(struct test_spec *test, unsigned long *bitmap) 1447 { 1448 return bitmap_full(bitmap, test->nb_sockets); 1449 } 1450 1451 static int send_pkts(struct test_spec *test, struct ifobject *ifobject) 1452 { 1453 bool timeout = !is_umem_valid(test->ifobj_rx); 1454 DECLARE_BITMAP(bitmap, test->nb_sockets); 1455 u32 i, ret; 1456 1457 while (!(all_packets_sent(test, bitmap))) { 1458 for (i = 0; i < test->nb_sockets; i++) { 1459 struct pkt_stream *pkt_stream; 1460 1461 pkt_stream = ifobject->xsk_arr[i].pkt_stream; 1462 if (!pkt_stream || pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) { 1463 __set_bit(i, bitmap); 1464 continue; 1465 } 1466 ret = __send_pkts(ifobject, &ifobject->xsk_arr[i], timeout); 1467 if (ret == TEST_CONTINUE && !test->fail) 1468 continue; 1469 1470 if ((ret || test->fail) && !timeout) 1471 return TEST_FAILURE; 1472 1473 if (ret == TEST_PASS && timeout) 1474 return ret; 1475 1476 ret = wait_for_tx_completion(&ifobject->xsk_arr[i]); 1477 if (ret) 1478 return TEST_FAILURE; 1479 } 1480 } 1481 1482 return TEST_PASS; 1483 } 1484 1485 static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats) 1486 { 1487 int fd = xsk_socket__fd(xsk), err; 1488 socklen_t optlen, expected_len; 1489 1490 optlen = sizeof(*stats); 1491 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen); 1492 if (err) { 1493 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n", 1494 __func__, -err, strerror(-err)); 1495 return TEST_FAILURE; 1496 } 1497 1498 expected_len = sizeof(struct xdp_statistics); 1499 if (optlen != expected_len) { 1500 ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n", 1501 __func__, expected_len, optlen); 1502 return TEST_FAILURE; 1503 } 1504 1505 return TEST_PASS; 1506 } 1507 1508 static int validate_rx_dropped(struct ifobject *ifobject) 1509 { 1510 struct xsk_socket *xsk = ifobject->xsk->xsk; 1511 struct xdp_statistics stats; 1512 int err; 1513 1514 err = kick_rx(ifobject->xsk); 1515 if (err) 1516 return TEST_FAILURE; 1517 1518 err = get_xsk_stats(xsk, &stats); 1519 if (err) 1520 return TEST_FAILURE; 1521 1522 /* The receiver calls getsockopt after receiving the last (valid) 1523 * packet which is not the final packet sent in this test (valid and 1524 * invalid packets are sent in alternating fashion with the final 1525 * packet being invalid). Since the last packet may or may not have 1526 * been dropped already, both outcomes must be allowed. 1527 */ 1528 if (stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 || 1529 stats.rx_dropped == ifobject->xsk->pkt_stream->nb_pkts / 2 - 1) 1530 return TEST_PASS; 1531 1532 return TEST_FAILURE; 1533 } 1534 1535 static int validate_rx_full(struct ifobject *ifobject) 1536 { 1537 struct xsk_socket *xsk = ifobject->xsk->xsk; 1538 struct xdp_statistics stats; 1539 int err; 1540 1541 usleep(1000); 1542 err = kick_rx(ifobject->xsk); 1543 if (err) 1544 return TEST_FAILURE; 1545 1546 err = get_xsk_stats(xsk, &stats); 1547 if (err) 1548 return TEST_FAILURE; 1549 1550 if (stats.rx_ring_full) 1551 return TEST_PASS; 1552 1553 return TEST_FAILURE; 1554 } 1555 1556 static int validate_fill_empty(struct ifobject *ifobject) 1557 { 1558 struct xsk_socket *xsk = ifobject->xsk->xsk; 1559 struct xdp_statistics stats; 1560 int err; 1561 1562 usleep(1000); 1563 err = kick_rx(ifobject->xsk); 1564 if (err) 1565 return TEST_FAILURE; 1566 1567 err = get_xsk_stats(xsk, &stats); 1568 if (err) 1569 return TEST_FAILURE; 1570 1571 if (stats.rx_fill_ring_empty_descs) 1572 return TEST_PASS; 1573 1574 return TEST_FAILURE; 1575 } 1576 1577 static int validate_tx_invalid_descs(struct ifobject *ifobject) 1578 { 1579 struct xsk_socket *xsk = ifobject->xsk->xsk; 1580 int fd = xsk_socket__fd(xsk); 1581 struct xdp_statistics stats; 1582 socklen_t optlen; 1583 int err; 1584 1585 optlen = sizeof(stats); 1586 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); 1587 if (err) { 1588 ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n", 1589 __func__, -err, strerror(-err)); 1590 return TEST_FAILURE; 1591 } 1592 1593 if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) { 1594 ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n", 1595 __func__, 1596 (unsigned long long)stats.tx_invalid_descs, 1597 ifobject->xsk->pkt_stream->nb_pkts); 1598 return TEST_FAILURE; 1599 } 1600 1601 return TEST_PASS; 1602 } 1603 1604 static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject, 1605 struct xsk_umem_info *umem, bool tx) 1606 { 1607 int i, ret; 1608 1609 for (i = 0; i < test->nb_sockets; i++) { 1610 bool shared = (ifobject->shared_umem && tx) ? true : !!i; 1611 u32 ctr = 0; 1612 1613 while (ctr++ < SOCK_RECONF_CTR) { 1614 ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem, 1615 ifobject, shared); 1616 if (!ret) 1617 break; 1618 1619 /* Retry if it fails as xsk_socket__create() is asynchronous */ 1620 if (ctr >= SOCK_RECONF_CTR) 1621 exit_with_error(-ret); 1622 usleep(USLEEP_MAX); 1623 } 1624 if (ifobject->busy_poll) 1625 enable_busy_poll(&ifobject->xsk_arr[i]); 1626 } 1627 } 1628 1629 static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject) 1630 { 1631 xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true); 1632 ifobject->xsk = &ifobject->xsk_arr[0]; 1633 ifobject->xskmap = test->ifobj_rx->xskmap; 1634 memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info)); 1635 ifobject->umem->base_addr = 0; 1636 } 1637 1638 static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, 1639 bool fill_up) 1640 { 1641 u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM; 1642 u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts; 1643 int ret; 1644 1645 if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) 1646 buffers_to_fill = umem->num_frames; 1647 else 1648 buffers_to_fill = umem->fill_size; 1649 1650 ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); 1651 if (ret != buffers_to_fill) 1652 exit_with_error(ENOSPC); 1653 1654 while (filled < buffers_to_fill) { 1655 struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts); 1656 u64 addr; 1657 u32 i; 1658 1659 for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt_stream, pkt); i++) { 1660 if (!pkt) { 1661 if (!fill_up) 1662 break; 1663 addr = filled * umem->frame_size + umem->base_addr; 1664 } else if (pkt->offset >= 0) { 1665 addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem); 1666 } else { 1667 addr = pkt->offset + umem_alloc_buffer(umem); 1668 } 1669 1670 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; 1671 if (++filled >= buffers_to_fill) 1672 break; 1673 } 1674 } 1675 xsk_ring_prod__submit(&umem->fq, filled); 1676 xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled); 1677 1678 pkt_stream_reset(pkt_stream); 1679 umem_reset_alloc(umem); 1680 } 1681 1682 static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) 1683 { 1684 u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; 1685 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 1686 LIBBPF_OPTS(bpf_xdp_query_opts, opts); 1687 void *bufs; 1688 int ret; 1689 u32 i; 1690 1691 if (ifobject->umem->unaligned_mode) 1692 mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB; 1693 1694 if (ifobject->shared_umem) 1695 umem_sz *= 2; 1696 1697 bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 1698 if (bufs == MAP_FAILED) 1699 exit_with_error(errno); 1700 1701 ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz); 1702 if (ret) 1703 exit_with_error(-ret); 1704 1705 xsk_configure_socket(test, ifobject, ifobject->umem, false); 1706 1707 ifobject->xsk = &ifobject->xsk_arr[0]; 1708 1709 if (!ifobject->rx_on) 1710 return; 1711 1712 xsk_populate_fill_ring(ifobject->umem, ifobject->xsk->pkt_stream, ifobject->use_fill_ring); 1713 1714 for (i = 0; i < test->nb_sockets; i++) { 1715 ifobject->xsk = &ifobject->xsk_arr[i]; 1716 ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, i); 1717 if (ret) 1718 exit_with_error(errno); 1719 } 1720 } 1721 1722 static void *worker_testapp_validate_tx(void *arg) 1723 { 1724 struct test_spec *test = (struct test_spec *)arg; 1725 struct ifobject *ifobject = test->ifobj_tx; 1726 int err; 1727 1728 if (test->current_step == 1) { 1729 if (!ifobject->shared_umem) 1730 thread_common_ops(test, ifobject); 1731 else 1732 thread_common_ops_tx(test, ifobject); 1733 } 1734 1735 err = send_pkts(test, ifobject); 1736 1737 if (!err && ifobject->validation_func) 1738 err = ifobject->validation_func(ifobject); 1739 if (err) 1740 report_failure(test); 1741 1742 pthread_exit(NULL); 1743 } 1744 1745 static void *worker_testapp_validate_rx(void *arg) 1746 { 1747 struct test_spec *test = (struct test_spec *)arg; 1748 struct ifobject *ifobject = test->ifobj_rx; 1749 int err; 1750 1751 if (test->current_step == 1) { 1752 thread_common_ops(test, ifobject); 1753 } else { 1754 xsk_clear_xskmap(ifobject->xskmap); 1755 err = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk, 0); 1756 if (err) { 1757 ksft_print_msg("Error: Failed to update xskmap, error %s\n", 1758 strerror(-err)); 1759 exit_with_error(-err); 1760 } 1761 } 1762 1763 pthread_barrier_wait(&barr); 1764 1765 err = receive_pkts(test); 1766 1767 if (!err && ifobject->validation_func) 1768 err = ifobject->validation_func(ifobject); 1769 if (err) 1770 report_failure(test); 1771 1772 pthread_exit(NULL); 1773 } 1774 1775 static u64 ceil_u64(u64 a, u64 b) 1776 { 1777 return (a + b - 1) / b; 1778 } 1779 1780 static void testapp_clean_xsk_umem(struct ifobject *ifobj) 1781 { 1782 u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size; 1783 1784 if (ifobj->shared_umem) 1785 umem_sz *= 2; 1786 1787 umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; 1788 xsk_umem__delete(ifobj->umem->umem); 1789 munmap(ifobj->umem->buffer, umem_sz); 1790 } 1791 1792 static void handler(int signum) 1793 { 1794 pthread_exit(NULL); 1795 } 1796 1797 static bool xdp_prog_changed_rx(struct test_spec *test) 1798 { 1799 struct ifobject *ifobj = test->ifobj_rx; 1800 1801 return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode; 1802 } 1803 1804 static bool xdp_prog_changed_tx(struct test_spec *test) 1805 { 1806 struct ifobject *ifobj = test->ifobj_tx; 1807 1808 return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode; 1809 } 1810 1811 static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog, 1812 struct bpf_map *xskmap, enum test_mode mode) 1813 { 1814 int err; 1815 1816 xsk_detach_xdp_program(ifobj->ifindex, mode_to_xdp_flags(ifobj->mode)); 1817 err = xsk_attach_xdp_program(xdp_prog, ifobj->ifindex, mode_to_xdp_flags(mode)); 1818 if (err) { 1819 ksft_print_msg("Error attaching XDP program\n"); 1820 exit_with_error(-err); 1821 } 1822 1823 if (ifobj->mode != mode && (mode == TEST_MODE_DRV || mode == TEST_MODE_ZC)) 1824 if (!xsk_is_in_mode(ifobj->ifindex, XDP_FLAGS_DRV_MODE)) { 1825 ksft_print_msg("ERROR: XDP prog not in DRV mode\n"); 1826 exit_with_error(EINVAL); 1827 } 1828 1829 ifobj->xdp_prog = xdp_prog; 1830 ifobj->xskmap = xskmap; 1831 ifobj->mode = mode; 1832 } 1833 1834 static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx, 1835 struct ifobject *ifobj_tx) 1836 { 1837 if (xdp_prog_changed_rx(test)) 1838 xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode); 1839 1840 if (!ifobj_tx || ifobj_tx->shared_umem) 1841 return; 1842 1843 if (xdp_prog_changed_tx(test)) 1844 xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode); 1845 } 1846 1847 static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *ifobj1, 1848 struct ifobject *ifobj2) 1849 { 1850 pthread_t t0, t1; 1851 int err; 1852 1853 if (test->mtu > MAX_ETH_PKT_SIZE) { 1854 if (test->mode == TEST_MODE_ZC && (!ifobj1->multi_buff_zc_supp || 1855 (ifobj2 && !ifobj2->multi_buff_zc_supp))) { 1856 ksft_test_result_skip("Multi buffer for zero-copy not supported.\n"); 1857 return TEST_SKIP; 1858 } 1859 if (test->mode != TEST_MODE_ZC && (!ifobj1->multi_buff_supp || 1860 (ifobj2 && !ifobj2->multi_buff_supp))) { 1861 ksft_test_result_skip("Multi buffer not supported.\n"); 1862 return TEST_SKIP; 1863 } 1864 } 1865 err = test_spec_set_mtu(test, test->mtu); 1866 if (err) { 1867 ksft_print_msg("Error, could not set mtu.\n"); 1868 exit_with_error(err); 1869 } 1870 1871 if (ifobj2) { 1872 if (pthread_barrier_init(&barr, NULL, 2)) 1873 exit_with_error(errno); 1874 pkt_stream_reset(ifobj2->xsk->pkt_stream); 1875 } 1876 1877 test->current_step++; 1878 pkt_stream_reset(ifobj1->xsk->pkt_stream); 1879 pkts_in_flight = 0; 1880 1881 signal(SIGUSR1, handler); 1882 /*Spawn RX thread */ 1883 pthread_create(&t0, NULL, ifobj1->func_ptr, test); 1884 1885 if (ifobj2) { 1886 pthread_barrier_wait(&barr); 1887 if (pthread_barrier_destroy(&barr)) 1888 exit_with_error(errno); 1889 1890 /*Spawn TX thread */ 1891 pthread_create(&t1, NULL, ifobj2->func_ptr, test); 1892 1893 pthread_join(t1, NULL); 1894 } 1895 1896 if (!ifobj2) 1897 pthread_kill(t0, SIGUSR1); 1898 else 1899 pthread_join(t0, NULL); 1900 1901 if (test->total_steps == test->current_step || test->fail) { 1902 u32 i; 1903 1904 if (ifobj2) 1905 for (i = 0; i < test->nb_sockets; i++) 1906 xsk_socket__delete(ifobj2->xsk_arr[i].xsk); 1907 1908 for (i = 0; i < test->nb_sockets; i++) 1909 xsk_socket__delete(ifobj1->xsk_arr[i].xsk); 1910 1911 testapp_clean_xsk_umem(ifobj1); 1912 if (ifobj2 && !ifobj2->shared_umem) 1913 testapp_clean_xsk_umem(ifobj2); 1914 } 1915 1916 return !!test->fail; 1917 } 1918 1919 static int testapp_validate_traffic(struct test_spec *test) 1920 { 1921 struct ifobject *ifobj_rx = test->ifobj_rx; 1922 struct ifobject *ifobj_tx = test->ifobj_tx; 1923 1924 if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) || 1925 (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) { 1926 ksft_test_result_skip("No huge pages present.\n"); 1927 return TEST_SKIP; 1928 } 1929 1930 if (test->set_ring) { 1931 if (ifobj_tx->hw_ring_size_supp) { 1932 if (set_ring_size(ifobj_tx)) { 1933 ksft_test_result_skip("Failed to change HW ring size.\n"); 1934 return TEST_FAILURE; 1935 } 1936 } else { 1937 ksft_test_result_skip("Changing HW ring size not supported.\n"); 1938 return TEST_SKIP; 1939 } 1940 } 1941 1942 xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx); 1943 return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx); 1944 } 1945 1946 static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj) 1947 { 1948 return __testapp_validate_traffic(test, ifobj, NULL); 1949 } 1950 1951 static int testapp_teardown(struct test_spec *test) 1952 { 1953 int i; 1954 1955 for (i = 0; i < MAX_TEARDOWN_ITER; i++) { 1956 if (testapp_validate_traffic(test)) 1957 return TEST_FAILURE; 1958 test_spec_reset(test); 1959 } 1960 1961 return TEST_PASS; 1962 } 1963 1964 static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) 1965 { 1966 thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr; 1967 struct ifobject *tmp_ifobj = (*ifobj1); 1968 1969 (*ifobj1)->func_ptr = (*ifobj2)->func_ptr; 1970 (*ifobj2)->func_ptr = tmp_func_ptr; 1971 1972 *ifobj1 = *ifobj2; 1973 *ifobj2 = tmp_ifobj; 1974 } 1975 1976 static int testapp_bidirectional(struct test_spec *test) 1977 { 1978 int res; 1979 1980 test->ifobj_tx->rx_on = true; 1981 test->ifobj_rx->tx_on = true; 1982 test->total_steps = 2; 1983 if (testapp_validate_traffic(test)) 1984 return TEST_FAILURE; 1985 1986 print_verbose("Switching Tx/Rx direction\n"); 1987 swap_directions(&test->ifobj_rx, &test->ifobj_tx); 1988 res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx); 1989 1990 swap_directions(&test->ifobj_rx, &test->ifobj_tx); 1991 return res; 1992 } 1993 1994 static int swap_xsk_resources(struct test_spec *test) 1995 { 1996 int ret; 1997 1998 test->ifobj_tx->xsk_arr[0].pkt_stream = NULL; 1999 test->ifobj_rx->xsk_arr[0].pkt_stream = NULL; 2000 test->ifobj_tx->xsk_arr[1].pkt_stream = test->tx_pkt_stream_default; 2001 test->ifobj_rx->xsk_arr[1].pkt_stream = test->rx_pkt_stream_default; 2002 test->ifobj_tx->xsk = &test->ifobj_tx->xsk_arr[1]; 2003 test->ifobj_rx->xsk = &test->ifobj_rx->xsk_arr[1]; 2004 2005 ret = xsk_update_xskmap(test->ifobj_rx->xskmap, test->ifobj_rx->xsk->xsk, 0); 2006 if (ret) 2007 return TEST_FAILURE; 2008 2009 return TEST_PASS; 2010 } 2011 2012 static int testapp_xdp_prog_cleanup(struct test_spec *test) 2013 { 2014 test->total_steps = 2; 2015 test->nb_sockets = 2; 2016 if (testapp_validate_traffic(test)) 2017 return TEST_FAILURE; 2018 2019 if (swap_xsk_resources(test)) 2020 return TEST_FAILURE; 2021 return testapp_validate_traffic(test); 2022 } 2023 2024 static int testapp_headroom(struct test_spec *test) 2025 { 2026 test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE; 2027 return testapp_validate_traffic(test); 2028 } 2029 2030 static int testapp_stats_rx_dropped(struct test_spec *test) 2031 { 2032 if (test->mode == TEST_MODE_ZC) { 2033 ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); 2034 return TEST_SKIP; 2035 } 2036 2037 pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); 2038 test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - 2039 XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; 2040 pkt_stream_receive_half(test); 2041 test->ifobj_rx->validation_func = validate_rx_dropped; 2042 return testapp_validate_traffic(test); 2043 } 2044 2045 static int testapp_stats_tx_invalid_descs(struct test_spec *test) 2046 { 2047 pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0); 2048 test->ifobj_tx->validation_func = validate_tx_invalid_descs; 2049 return testapp_validate_traffic(test); 2050 } 2051 2052 static int testapp_stats_rx_full(struct test_spec *test) 2053 { 2054 pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); 2055 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 2056 2057 test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS; 2058 test->ifobj_rx->release_rx = false; 2059 test->ifobj_rx->validation_func = validate_rx_full; 2060 return testapp_validate_traffic(test); 2061 } 2062 2063 static int testapp_stats_fill_empty(struct test_spec *test) 2064 { 2065 pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); 2066 test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); 2067 2068 test->ifobj_rx->use_fill_ring = false; 2069 test->ifobj_rx->validation_func = validate_fill_empty; 2070 return testapp_validate_traffic(test); 2071 } 2072 2073 static int testapp_send_receive_unaligned(struct test_spec *test) 2074 { 2075 test->ifobj_tx->umem->unaligned_mode = true; 2076 test->ifobj_rx->umem->unaligned_mode = true; 2077 /* Let half of the packets straddle a 4K buffer boundary */ 2078 pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2); 2079 2080 return testapp_validate_traffic(test); 2081 } 2082 2083 static int testapp_send_receive_unaligned_mb(struct test_spec *test) 2084 { 2085 test->mtu = MAX_ETH_JUMBO_SIZE; 2086 test->ifobj_tx->umem->unaligned_mode = true; 2087 test->ifobj_rx->umem->unaligned_mode = true; 2088 pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE); 2089 return testapp_validate_traffic(test); 2090 } 2091 2092 static int testapp_single_pkt(struct test_spec *test) 2093 { 2094 struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}}; 2095 2096 pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 2097 return testapp_validate_traffic(test); 2098 } 2099 2100 static int testapp_send_receive_mb(struct test_spec *test) 2101 { 2102 test->mtu = MAX_ETH_JUMBO_SIZE; 2103 pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE); 2104 2105 return testapp_validate_traffic(test); 2106 } 2107 2108 static int testapp_invalid_desc_mb(struct test_spec *test) 2109 { 2110 struct xsk_umem_info *umem = test->ifobj_tx->umem; 2111 u64 umem_size = umem->num_frames * umem->frame_size; 2112 struct pkt pkts[] = { 2113 /* Valid packet for synch to start with */ 2114 {0, MIN_PKT_SIZE, 0, true, 0}, 2115 /* Zero frame len is not legal */ 2116 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 2117 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 2118 {0, 0, 0, false, 0}, 2119 /* Invalid address in the second frame */ 2120 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 2121 {umem_size, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 2122 /* Invalid len in the middle */ 2123 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 2124 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 2125 /* Invalid options in the middle */ 2126 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 2127 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XSK_DESC__INVALID_OPTION}, 2128 /* Transmit 2 frags, receive 3 */ 2129 {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, XDP_PKT_CONTD}, 2130 {0, XSK_UMEM__MAX_FRAME_SIZE, 0, true, 0}, 2131 /* Middle frame crosses chunk boundary with small length */ 2132 {0, XSK_UMEM__LARGE_FRAME_SIZE, 0, false, XDP_PKT_CONTD}, 2133 {-MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false, 0}, 2134 /* Valid packet for synch so that something is received */ 2135 {0, MIN_PKT_SIZE, 0, true, 0}}; 2136 2137 if (umem->unaligned_mode) { 2138 /* Crossing a chunk boundary allowed */ 2139 pkts[12].valid = true; 2140 pkts[13].valid = true; 2141 } 2142 2143 test->mtu = MAX_ETH_JUMBO_SIZE; 2144 pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 2145 return testapp_validate_traffic(test); 2146 } 2147 2148 static int testapp_invalid_desc(struct test_spec *test) 2149 { 2150 struct xsk_umem_info *umem = test->ifobj_tx->umem; 2151 u64 umem_size = umem->num_frames * umem->frame_size; 2152 struct pkt pkts[] = { 2153 /* Zero packet address allowed */ 2154 {0, MIN_PKT_SIZE, 0, true}, 2155 /* Allowed packet */ 2156 {0, MIN_PKT_SIZE, 0, true}, 2157 /* Straddling the start of umem */ 2158 {-2, MIN_PKT_SIZE, 0, false}, 2159 /* Packet too large */ 2160 {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, 2161 /* Up to end of umem allowed */ 2162 {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true}, 2163 /* After umem ends */ 2164 {umem_size, MIN_PKT_SIZE, 0, false}, 2165 /* Straddle the end of umem */ 2166 {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, 2167 /* Straddle a 4K boundary */ 2168 {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, 2169 /* Straddle a 2K boundary */ 2170 {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true}, 2171 /* Valid packet for synch so that something is received */ 2172 {0, MIN_PKT_SIZE, 0, true}}; 2173 2174 if (umem->unaligned_mode) { 2175 /* Crossing a page boundary allowed */ 2176 pkts[7].valid = true; 2177 } 2178 if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { 2179 /* Crossing a 2K frame size boundary not allowed */ 2180 pkts[8].valid = false; 2181 } 2182 2183 if (test->ifobj_tx->shared_umem) { 2184 pkts[4].offset += umem_size; 2185 pkts[5].offset += umem_size; 2186 pkts[6].offset += umem_size; 2187 } 2188 2189 pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 2190 return testapp_validate_traffic(test); 2191 } 2192 2193 static int testapp_xdp_drop(struct test_spec *test) 2194 { 2195 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 2196 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 2197 2198 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop, 2199 skel_rx->maps.xsk, skel_tx->maps.xsk); 2200 2201 pkt_stream_receive_half(test); 2202 return testapp_validate_traffic(test); 2203 } 2204 2205 static int testapp_xdp_metadata_copy(struct test_spec *test) 2206 { 2207 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 2208 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 2209 struct bpf_map *data_map; 2210 int count = 0; 2211 int key = 0; 2212 2213 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata, 2214 skel_tx->progs.xsk_xdp_populate_metadata, 2215 skel_rx->maps.xsk, skel_tx->maps.xsk); 2216 test->ifobj_rx->use_metadata = true; 2217 2218 data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss"); 2219 if (!data_map || !bpf_map__is_internal(data_map)) { 2220 ksft_print_msg("Error: could not find bss section of XDP program\n"); 2221 return TEST_FAILURE; 2222 } 2223 2224 if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) { 2225 ksft_print_msg("Error: could not update count element\n"); 2226 return TEST_FAILURE; 2227 } 2228 2229 return testapp_validate_traffic(test); 2230 } 2231 2232 static int testapp_xdp_shared_umem(struct test_spec *test) 2233 { 2234 struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; 2235 struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; 2236 2237 test->total_steps = 1; 2238 test->nb_sockets = 2; 2239 2240 test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_shared_umem, 2241 skel_tx->progs.xsk_xdp_shared_umem, 2242 skel_rx->maps.xsk, skel_tx->maps.xsk); 2243 2244 pkt_stream_even_odd_sequence(test); 2245 2246 return testapp_validate_traffic(test); 2247 } 2248 2249 static int testapp_poll_txq_tmout(struct test_spec *test) 2250 { 2251 test->ifobj_tx->use_poll = true; 2252 /* create invalid frame by set umem frame_size and pkt length equal to 2048 */ 2253 test->ifobj_tx->umem->frame_size = 2048; 2254 pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048); 2255 return testapp_validate_traffic_single_thread(test, test->ifobj_tx); 2256 } 2257 2258 static int testapp_poll_rxq_tmout(struct test_spec *test) 2259 { 2260 test->ifobj_rx->use_poll = true; 2261 return testapp_validate_traffic_single_thread(test, test->ifobj_rx); 2262 } 2263 2264 static int testapp_too_many_frags(struct test_spec *test) 2265 { 2266 struct pkt *pkts; 2267 u32 max_frags, i; 2268 int ret; 2269 2270 if (test->mode == TEST_MODE_ZC) { 2271 max_frags = test->ifobj_tx->xdp_zc_max_segs; 2272 } else { 2273 max_frags = get_max_skb_frags(); 2274 if (!max_frags) { 2275 ksft_print_msg("Couldn't retrieve MAX_SKB_FRAGS from system, using default (17) value\n"); 2276 max_frags = 17; 2277 } 2278 max_frags += 1; 2279 } 2280 2281 pkts = calloc(2 * max_frags + 2, sizeof(struct pkt)); 2282 if (!pkts) 2283 return TEST_FAILURE; 2284 2285 test->mtu = MAX_ETH_JUMBO_SIZE; 2286 2287 /* Valid packet for synch */ 2288 pkts[0].len = MIN_PKT_SIZE; 2289 pkts[0].valid = true; 2290 2291 /* One valid packet with the max amount of frags */ 2292 for (i = 1; i < max_frags + 1; i++) { 2293 pkts[i].len = MIN_PKT_SIZE; 2294 pkts[i].options = XDP_PKT_CONTD; 2295 pkts[i].valid = true; 2296 } 2297 pkts[max_frags].options = 0; 2298 2299 /* An invalid packet with the max amount of frags but signals packet 2300 * continues on the last frag 2301 */ 2302 for (i = max_frags + 1; i < 2 * max_frags + 1; i++) { 2303 pkts[i].len = MIN_PKT_SIZE; 2304 pkts[i].options = XDP_PKT_CONTD; 2305 pkts[i].valid = false; 2306 } 2307 2308 /* Valid packet for synch */ 2309 pkts[2 * max_frags + 1].len = MIN_PKT_SIZE; 2310 pkts[2 * max_frags + 1].valid = true; 2311 2312 pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2); 2313 ret = testapp_validate_traffic(test); 2314 2315 free(pkts); 2316 return ret; 2317 } 2318 2319 static int xsk_load_xdp_programs(struct ifobject *ifobj) 2320 { 2321 ifobj->xdp_progs = xsk_xdp_progs__open_and_load(); 2322 if (libbpf_get_error(ifobj->xdp_progs)) 2323 return libbpf_get_error(ifobj->xdp_progs); 2324 2325 return 0; 2326 } 2327 2328 static void xsk_unload_xdp_programs(struct ifobject *ifobj) 2329 { 2330 xsk_xdp_progs__destroy(ifobj->xdp_progs); 2331 } 2332 2333 /* Simple test */ 2334 static bool hugepages_present(void) 2335 { 2336 size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; 2337 void *bufs; 2338 2339 bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, 2340 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB); 2341 if (bufs == MAP_FAILED) 2342 return false; 2343 2344 mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; 2345 munmap(bufs, mmap_sz); 2346 return true; 2347 } 2348 2349 static void init_iface(struct ifobject *ifobj, thread_func_t func_ptr) 2350 { 2351 LIBBPF_OPTS(bpf_xdp_query_opts, query_opts); 2352 int err; 2353 2354 ifobj->func_ptr = func_ptr; 2355 2356 err = xsk_load_xdp_programs(ifobj); 2357 if (err) { 2358 ksft_print_msg("Error loading XDP program\n"); 2359 exit_with_error(err); 2360 } 2361 2362 if (hugepages_present()) 2363 ifobj->unaligned_supp = true; 2364 2365 err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts); 2366 if (err) { 2367 ksft_print_msg("Error querying XDP capabilities\n"); 2368 exit_with_error(-err); 2369 } 2370 if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG) 2371 ifobj->multi_buff_supp = true; 2372 if (query_opts.feature_flags & NETDEV_XDP_ACT_XSK_ZEROCOPY) { 2373 if (query_opts.xdp_zc_max_segs > 1) { 2374 ifobj->multi_buff_zc_supp = true; 2375 ifobj->xdp_zc_max_segs = query_opts.xdp_zc_max_segs; 2376 } else { 2377 ifobj->xdp_zc_max_segs = 0; 2378 } 2379 } 2380 } 2381 2382 static int testapp_send_receive(struct test_spec *test) 2383 { 2384 return testapp_validate_traffic(test); 2385 } 2386 2387 static int testapp_send_receive_2k_frame(struct test_spec *test) 2388 { 2389 test->ifobj_tx->umem->frame_size = 2048; 2390 test->ifobj_rx->umem->frame_size = 2048; 2391 pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE); 2392 return testapp_validate_traffic(test); 2393 } 2394 2395 static int testapp_poll_rx(struct test_spec *test) 2396 { 2397 test->ifobj_rx->use_poll = true; 2398 return testapp_validate_traffic(test); 2399 } 2400 2401 static int testapp_poll_tx(struct test_spec *test) 2402 { 2403 test->ifobj_tx->use_poll = true; 2404 return testapp_validate_traffic(test); 2405 } 2406 2407 static int testapp_aligned_inv_desc(struct test_spec *test) 2408 { 2409 return testapp_invalid_desc(test); 2410 } 2411 2412 static int testapp_aligned_inv_desc_2k_frame(struct test_spec *test) 2413 { 2414 test->ifobj_tx->umem->frame_size = 2048; 2415 test->ifobj_rx->umem->frame_size = 2048; 2416 return testapp_invalid_desc(test); 2417 } 2418 2419 static int testapp_unaligned_inv_desc(struct test_spec *test) 2420 { 2421 test->ifobj_tx->umem->unaligned_mode = true; 2422 test->ifobj_rx->umem->unaligned_mode = true; 2423 return testapp_invalid_desc(test); 2424 } 2425 2426 static int testapp_unaligned_inv_desc_4001_frame(struct test_spec *test) 2427 { 2428 u64 page_size, umem_size; 2429 2430 /* Odd frame size so the UMEM doesn't end near a page boundary. */ 2431 test->ifobj_tx->umem->frame_size = 4001; 2432 test->ifobj_rx->umem->frame_size = 4001; 2433 test->ifobj_tx->umem->unaligned_mode = true; 2434 test->ifobj_rx->umem->unaligned_mode = true; 2435 /* This test exists to test descriptors that staddle the end of 2436 * the UMEM but not a page. 2437 */ 2438 page_size = sysconf(_SC_PAGESIZE); 2439 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; 2440 assert(umem_size % page_size > MIN_PKT_SIZE); 2441 assert(umem_size % page_size < page_size - MIN_PKT_SIZE); 2442 2443 return testapp_invalid_desc(test); 2444 } 2445 2446 static int testapp_aligned_inv_desc_mb(struct test_spec *test) 2447 { 2448 return testapp_invalid_desc_mb(test); 2449 } 2450 2451 static int testapp_unaligned_inv_desc_mb(struct test_spec *test) 2452 { 2453 test->ifobj_tx->umem->unaligned_mode = true; 2454 test->ifobj_rx->umem->unaligned_mode = true; 2455 return testapp_invalid_desc_mb(test); 2456 } 2457 2458 static int testapp_xdp_metadata(struct test_spec *test) 2459 { 2460 return testapp_xdp_metadata_copy(test); 2461 } 2462 2463 static int testapp_xdp_metadata_mb(struct test_spec *test) 2464 { 2465 test->mtu = MAX_ETH_JUMBO_SIZE; 2466 return testapp_xdp_metadata_copy(test); 2467 } 2468 2469 static int testapp_hw_sw_min_ring_size(struct test_spec *test) 2470 { 2471 int ret; 2472 2473 test->set_ring = true; 2474 test->total_steps = 2; 2475 test->ifobj_tx->ring.tx_pending = DEFAULT_BATCH_SIZE; 2476 test->ifobj_tx->ring.rx_pending = DEFAULT_BATCH_SIZE * 2; 2477 test->ifobj_tx->xsk->batch_size = 1; 2478 test->ifobj_rx->xsk->batch_size = 1; 2479 ret = testapp_validate_traffic(test); 2480 if (ret) 2481 return ret; 2482 2483 /* Set batch size to hw_ring_size - 1 */ 2484 test->ifobj_tx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1; 2485 test->ifobj_rx->xsk->batch_size = DEFAULT_BATCH_SIZE - 1; 2486 return testapp_validate_traffic(test); 2487 } 2488 2489 static int testapp_hw_sw_max_ring_size(struct test_spec *test) 2490 { 2491 u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4; 2492 int ret; 2493 2494 test->set_ring = true; 2495 test->total_steps = 2; 2496 test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending; 2497 test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending; 2498 test->ifobj_rx->umem->num_frames = max_descs; 2499 test->ifobj_rx->umem->fill_size = max_descs; 2500 test->ifobj_rx->umem->comp_size = max_descs; 2501 test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; 2502 test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; 2503 2504 ret = testapp_validate_traffic(test); 2505 if (ret) 2506 return ret; 2507 2508 /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when 2509 * updating the Rx HW tail register. 2510 */ 2511 test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8; 2512 test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8; 2513 pkt_stream_replace(test, max_descs, MIN_PKT_SIZE); 2514 return testapp_validate_traffic(test); 2515 } 2516 2517 static void run_pkt_test(struct test_spec *test) 2518 { 2519 int ret; 2520 2521 ret = test->test_func(test); 2522 2523 if (ret == TEST_PASS) 2524 ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test), 2525 test->name); 2526 pkt_stream_restore_default(test); 2527 } 2528 2529 static struct ifobject *ifobject_create(void) 2530 { 2531 struct ifobject *ifobj; 2532 2533 ifobj = calloc(1, sizeof(struct ifobject)); 2534 if (!ifobj) 2535 return NULL; 2536 2537 ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr)); 2538 if (!ifobj->xsk_arr) 2539 goto out_xsk_arr; 2540 2541 ifobj->umem = calloc(1, sizeof(*ifobj->umem)); 2542 if (!ifobj->umem) 2543 goto out_umem; 2544 2545 return ifobj; 2546 2547 out_umem: 2548 free(ifobj->xsk_arr); 2549 out_xsk_arr: 2550 free(ifobj); 2551 return NULL; 2552 } 2553 2554 static void ifobject_delete(struct ifobject *ifobj) 2555 { 2556 free(ifobj->umem); 2557 free(ifobj->xsk_arr); 2558 free(ifobj); 2559 } 2560 2561 static bool is_xdp_supported(int ifindex) 2562 { 2563 int flags = XDP_FLAGS_DRV_MODE; 2564 2565 LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags); 2566 struct bpf_insn insns[2] = { 2567 BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), 2568 BPF_EXIT_INSN() 2569 }; 2570 int prog_fd, insn_cnt = ARRAY_SIZE(insns); 2571 int err; 2572 2573 prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); 2574 if (prog_fd < 0) 2575 return false; 2576 2577 err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL); 2578 if (err) { 2579 close(prog_fd); 2580 return false; 2581 } 2582 2583 bpf_xdp_detach(ifindex, flags, NULL); 2584 close(prog_fd); 2585 2586 return true; 2587 } 2588 2589 static const struct test_spec tests[] = { 2590 {.name = "SEND_RECEIVE", .test_func = testapp_send_receive}, 2591 {.name = "SEND_RECEIVE_2K_FRAME", .test_func = testapp_send_receive_2k_frame}, 2592 {.name = "SEND_RECEIVE_SINGLE_PKT", .test_func = testapp_single_pkt}, 2593 {.name = "POLL_RX", .test_func = testapp_poll_rx}, 2594 {.name = "POLL_TX", .test_func = testapp_poll_tx}, 2595 {.name = "POLL_RXQ_FULL", .test_func = testapp_poll_rxq_tmout}, 2596 {.name = "POLL_TXQ_FULL", .test_func = testapp_poll_txq_tmout}, 2597 {.name = "SEND_RECEIVE_UNALIGNED", .test_func = testapp_send_receive_unaligned}, 2598 {.name = "ALIGNED_INV_DESC", .test_func = testapp_aligned_inv_desc}, 2599 {.name = "ALIGNED_INV_DESC_2K_FRAME_SIZE", .test_func = testapp_aligned_inv_desc_2k_frame}, 2600 {.name = "UNALIGNED_INV_DESC", .test_func = testapp_unaligned_inv_desc}, 2601 {.name = "UNALIGNED_INV_DESC_4001_FRAME_SIZE", 2602 .test_func = testapp_unaligned_inv_desc_4001_frame}, 2603 {.name = "UMEM_HEADROOM", .test_func = testapp_headroom}, 2604 {.name = "TEARDOWN", .test_func = testapp_teardown}, 2605 {.name = "BIDIRECTIONAL", .test_func = testapp_bidirectional}, 2606 {.name = "STAT_RX_DROPPED", .test_func = testapp_stats_rx_dropped}, 2607 {.name = "STAT_TX_INVALID", .test_func = testapp_stats_tx_invalid_descs}, 2608 {.name = "STAT_RX_FULL", .test_func = testapp_stats_rx_full}, 2609 {.name = "STAT_FILL_EMPTY", .test_func = testapp_stats_fill_empty}, 2610 {.name = "XDP_PROG_CLEANUP", .test_func = testapp_xdp_prog_cleanup}, 2611 {.name = "XDP_DROP_HALF", .test_func = testapp_xdp_drop}, 2612 {.name = "XDP_SHARED_UMEM", .test_func = testapp_xdp_shared_umem}, 2613 {.name = "XDP_METADATA_COPY", .test_func = testapp_xdp_metadata}, 2614 {.name = "XDP_METADATA_COPY_MULTI_BUFF", .test_func = testapp_xdp_metadata_mb}, 2615 {.name = "SEND_RECEIVE_9K_PACKETS", .test_func = testapp_send_receive_mb}, 2616 {.name = "SEND_RECEIVE_UNALIGNED_9K_PACKETS", 2617 .test_func = testapp_send_receive_unaligned_mb}, 2618 {.name = "ALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_aligned_inv_desc_mb}, 2619 {.name = "UNALIGNED_INV_DESC_MULTI_BUFF", .test_func = testapp_unaligned_inv_desc_mb}, 2620 {.name = "TOO_MANY_FRAGS", .test_func = testapp_too_many_frags}, 2621 {.name = "HW_SW_MIN_RING_SIZE", .test_func = testapp_hw_sw_min_ring_size}, 2622 {.name = "HW_SW_MAX_RING_SIZE", .test_func = testapp_hw_sw_max_ring_size}, 2623 }; 2624 2625 static void print_tests(void) 2626 { 2627 u32 i; 2628 2629 printf("Tests:\n"); 2630 for (i = 0; i < ARRAY_SIZE(tests); i++) 2631 printf("%u: %s\n", i, tests[i].name); 2632 } 2633 2634 int main(int argc, char **argv) 2635 { 2636 struct pkt_stream *rx_pkt_stream_default; 2637 struct pkt_stream *tx_pkt_stream_default; 2638 struct ifobject *ifobj_tx, *ifobj_rx; 2639 u32 i, j, failed_tests = 0, nb_tests; 2640 int modes = TEST_MODE_SKB + 1; 2641 struct test_spec test; 2642 bool shared_netdev; 2643 int ret; 2644 2645 /* Use libbpf 1.0 API mode */ 2646 libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 2647 2648 ifobj_tx = ifobject_create(); 2649 if (!ifobj_tx) 2650 exit_with_error(ENOMEM); 2651 ifobj_rx = ifobject_create(); 2652 if (!ifobj_rx) 2653 exit_with_error(ENOMEM); 2654 2655 setlocale(LC_ALL, ""); 2656 2657 parse_command_line(ifobj_tx, ifobj_rx, argc, argv); 2658 2659 if (opt_print_tests) { 2660 print_tests(); 2661 ksft_exit_xpass(); 2662 } 2663 if (opt_run_test != RUN_ALL_TESTS && opt_run_test >= ARRAY_SIZE(tests)) { 2664 ksft_print_msg("Error: test %u does not exist.\n", opt_run_test); 2665 ksft_exit_xfail(); 2666 } 2667 2668 shared_netdev = (ifobj_tx->ifindex == ifobj_rx->ifindex); 2669 ifobj_tx->shared_umem = shared_netdev; 2670 ifobj_rx->shared_umem = shared_netdev; 2671 2672 if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) 2673 print_usage(argv); 2674 2675 if (is_xdp_supported(ifobj_tx->ifindex)) { 2676 modes++; 2677 if (ifobj_zc_avail(ifobj_tx)) 2678 modes++; 2679 } 2680 2681 ret = get_hw_ring_size(ifobj_tx->ifname, &ifobj_tx->ring); 2682 if (!ret) { 2683 ifobj_tx->hw_ring_size_supp = true; 2684 ifobj_tx->set_ring.default_tx = ifobj_tx->ring.tx_pending; 2685 ifobj_tx->set_ring.default_rx = ifobj_tx->ring.rx_pending; 2686 } 2687 2688 init_iface(ifobj_rx, worker_testapp_validate_rx); 2689 init_iface(ifobj_tx, worker_testapp_validate_tx); 2690 2691 test_spec_init(&test, ifobj_tx, ifobj_rx, 0, &tests[0]); 2692 tx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE); 2693 rx_pkt_stream_default = pkt_stream_generate(DEFAULT_PKT_CNT, MIN_PKT_SIZE); 2694 if (!tx_pkt_stream_default || !rx_pkt_stream_default) 2695 exit_with_error(ENOMEM); 2696 test.tx_pkt_stream_default = tx_pkt_stream_default; 2697 test.rx_pkt_stream_default = rx_pkt_stream_default; 2698 2699 if (opt_run_test == RUN_ALL_TESTS) 2700 nb_tests = ARRAY_SIZE(tests); 2701 else 2702 nb_tests = 1; 2703 if (opt_mode == TEST_MODE_ALL) { 2704 ksft_set_plan(modes * nb_tests); 2705 } else { 2706 if (opt_mode == TEST_MODE_DRV && modes <= TEST_MODE_DRV) { 2707 ksft_print_msg("Error: XDP_DRV mode not supported.\n"); 2708 ksft_exit_xfail(); 2709 } 2710 if (opt_mode == TEST_MODE_ZC && modes <= TEST_MODE_ZC) { 2711 ksft_print_msg("Error: zero-copy mode not supported.\n"); 2712 ksft_exit_xfail(); 2713 } 2714 2715 ksft_set_plan(nb_tests); 2716 } 2717 2718 for (i = 0; i < modes; i++) { 2719 if (opt_mode != TEST_MODE_ALL && i != opt_mode) 2720 continue; 2721 2722 for (j = 0; j < ARRAY_SIZE(tests); j++) { 2723 if (opt_run_test != RUN_ALL_TESTS && j != opt_run_test) 2724 continue; 2725 2726 test_spec_init(&test, ifobj_tx, ifobj_rx, i, &tests[j]); 2727 run_pkt_test(&test); 2728 usleep(USLEEP_MAX); 2729 2730 if (test.fail) 2731 failed_tests++; 2732 } 2733 } 2734 2735 if (ifobj_tx->hw_ring_size_supp) 2736 hw_ring_size_reset(ifobj_tx); 2737 2738 pkt_stream_delete(tx_pkt_stream_default); 2739 pkt_stream_delete(rx_pkt_stream_default); 2740 xsk_unload_xdp_programs(ifobj_tx); 2741 xsk_unload_xdp_programs(ifobj_rx); 2742 ifobject_delete(ifobj_tx); 2743 ifobject_delete(ifobj_rx); 2744 2745 if (failed_tests) 2746 ksft_exit_fail(); 2747 else 2748 ksft_exit_pass(); 2749 } 2750