1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * tcpdevmem netcat. Works similarly to netcat but does device memory TCP 4 * instead of regular TCP. Uses udmabuf to mock a dmabuf provider. 5 * 6 * Usage: 7 * 8 * On server: 9 * ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201 10 * 11 * On client: 12 * echo -n "hello\nworld" | \ 13 * ncdevmem -s <server IP> [-c <client IP>] -p 5201 -f eth1 14 * 15 * Note this is compatible with regular netcat. i.e. the sender or receiver can 16 * be replaced with regular netcat to test the RX or TX path in isolation. 17 * 18 * Test data validation (devmem TCP on RX only): 19 * 20 * On server: 21 * ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201 -v 7 22 * 23 * On client: 24 * yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \ 25 * head -c 1G | \ 26 * nc <server IP> 5201 -p 5201 27 * 28 * Test data validation (devmem TCP on RX and TX, validation happens on RX): 29 * 30 * On server: 31 * ncdevmem -s <server IP> [-c <client IP>] -l -p 5201 -v 8 -f eth1 32 * 33 * On client: 34 * yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06\\x07) | \ 35 * head -c 1M | \ 36 * ncdevmem -s <server IP> [-c <client IP>] -p 5201 -f eth1 37 */ 38 #define _GNU_SOURCE 39 #define __EXPORTED_HEADERS__ 40 41 #include <linux/uio.h> 42 #include <stdarg.h> 43 #include <stdio.h> 44 #include <stdlib.h> 45 #include <unistd.h> 46 #include <stdbool.h> 47 #include <string.h> 48 #include <errno.h> 49 #define __iovec_defined 50 #include <fcntl.h> 51 #include <malloc.h> 52 #include <error.h> 53 #include <poll.h> 54 55 #include <arpa/inet.h> 56 #include <sys/socket.h> 57 #include <sys/mman.h> 58 #include <sys/ioctl.h> 59 #include <sys/syscall.h> 60 #include <sys/time.h> 61 62 #include <linux/memfd.h> 63 #include <linux/dma-buf.h> 64 #include <linux/errqueue.h> 65 #include <linux/udmabuf.h> 66 #include <linux/types.h> 67 #include <linux/netlink.h> 68 #include <linux/genetlink.h> 69 #include <linux/netdev.h> 70 #include <linux/ethtool_netlink.h> 71 #include <time.h> 72 #include <net/if.h> 73 74 #include "netdev-user.h" 75 #include "ethtool-user.h" 76 #include <ynl.h> 77 78 #define PAGE_SHIFT 12 79 #define TEST_PREFIX "ncdevmem" 80 #define NUM_PAGES 16000 81 82 #ifndef MSG_SOCK_DEVMEM 83 #define MSG_SOCK_DEVMEM 0x2000000 84 #endif 85 86 #define MAX_IOV 1024 87 88 static size_t max_chunk; 89 static char *server_ip; 90 static char *client_ip; 91 static char *port; 92 static size_t do_validation; 93 static int start_queue = -1; 94 static int num_queues = -1; 95 static char *ifname; 96 static unsigned int ifindex; 97 static unsigned int dmabuf_id; 98 static uint32_t tx_dmabuf_id; 99 static int waittime_ms = 500; 100 101 /* System state loaded by current_config_load() */ 102 #define MAX_FLOWS 8 103 static int ntuple_ids[MAX_FLOWS] = { -1, -1, -1, -1, -1, -1, -1, -1, }; 104 105 struct memory_buffer { 106 int fd; 107 size_t size; 108 109 int devfd; 110 int memfd; 111 char *buf_mem; 112 }; 113 114 struct memory_provider { 115 struct memory_buffer *(*alloc)(size_t size); 116 void (*free)(struct memory_buffer *ctx); 117 void (*memcpy_to_device)(struct memory_buffer *dst, size_t off, 118 void *src, int n); 119 void (*memcpy_from_device)(void *dst, struct memory_buffer *src, 120 size_t off, int n); 121 }; 122 123 static void pr_err(const char *fmt, ...) 124 { 125 va_list args; 126 127 fprintf(stderr, "%s: ", TEST_PREFIX); 128 129 va_start(args, fmt); 130 vfprintf(stderr, fmt, args); 131 va_end(args); 132 133 if (errno != 0) 134 fprintf(stderr, ": %s", strerror(errno)); 135 fprintf(stderr, "\n"); 136 } 137 138 static struct memory_buffer *udmabuf_alloc(size_t size) 139 { 140 struct udmabuf_create create; 141 struct memory_buffer *ctx; 142 int ret; 143 144 ctx = malloc(sizeof(*ctx)); 145 if (!ctx) 146 return NULL; 147 148 ctx->size = size; 149 150 ctx->devfd = open("/dev/udmabuf", O_RDWR); 151 if (ctx->devfd < 0) { 152 pr_err("[skip,no-udmabuf: Unable to access DMA buffer device file]"); 153 goto err_free_ctx; 154 } 155 156 ctx->memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING); 157 if (ctx->memfd < 0) { 158 pr_err("[skip,no-memfd]"); 159 goto err_close_dev; 160 } 161 162 ret = fcntl(ctx->memfd, F_ADD_SEALS, F_SEAL_SHRINK); 163 if (ret < 0) { 164 pr_err("[skip,fcntl-add-seals]"); 165 goto err_close_memfd; 166 } 167 168 ret = ftruncate(ctx->memfd, size); 169 if (ret == -1) { 170 pr_err("[FAIL,memfd-truncate]"); 171 goto err_close_memfd; 172 } 173 174 memset(&create, 0, sizeof(create)); 175 176 create.memfd = ctx->memfd; 177 create.offset = 0; 178 create.size = size; 179 ctx->fd = ioctl(ctx->devfd, UDMABUF_CREATE, &create); 180 if (ctx->fd < 0) { 181 pr_err("[FAIL, create udmabuf]"); 182 goto err_close_fd; 183 } 184 185 ctx->buf_mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, 186 ctx->fd, 0); 187 if (ctx->buf_mem == MAP_FAILED) { 188 pr_err("[FAIL, map udmabuf]"); 189 goto err_close_fd; 190 } 191 192 return ctx; 193 194 err_close_fd: 195 close(ctx->fd); 196 err_close_memfd: 197 close(ctx->memfd); 198 err_close_dev: 199 close(ctx->devfd); 200 err_free_ctx: 201 free(ctx); 202 return NULL; 203 } 204 205 static void udmabuf_free(struct memory_buffer *ctx) 206 { 207 munmap(ctx->buf_mem, ctx->size); 208 close(ctx->fd); 209 close(ctx->memfd); 210 close(ctx->devfd); 211 free(ctx); 212 } 213 214 static void udmabuf_memcpy_to_device(struct memory_buffer *dst, size_t off, 215 void *src, int n) 216 { 217 struct dma_buf_sync sync = {}; 218 219 sync.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_WRITE; 220 ioctl(dst->fd, DMA_BUF_IOCTL_SYNC, &sync); 221 222 memcpy(dst->buf_mem + off, src, n); 223 224 sync.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_WRITE; 225 ioctl(dst->fd, DMA_BUF_IOCTL_SYNC, &sync); 226 } 227 228 static void udmabuf_memcpy_from_device(void *dst, struct memory_buffer *src, 229 size_t off, int n) 230 { 231 struct dma_buf_sync sync = {}; 232 233 sync.flags = DMA_BUF_SYNC_START; 234 ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync); 235 236 memcpy(dst, src->buf_mem + off, n); 237 238 sync.flags = DMA_BUF_SYNC_END; 239 ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync); 240 } 241 242 static struct memory_provider udmabuf_memory_provider = { 243 .alloc = udmabuf_alloc, 244 .free = udmabuf_free, 245 .memcpy_to_device = udmabuf_memcpy_to_device, 246 .memcpy_from_device = udmabuf_memcpy_from_device, 247 }; 248 249 static struct memory_provider *provider = &udmabuf_memory_provider; 250 251 static void print_nonzero_bytes(void *ptr, size_t size) 252 { 253 unsigned char *p = ptr; 254 unsigned int i; 255 256 for (i = 0; i < size; i++) 257 putchar(p[i]); 258 } 259 260 int validate_buffer(void *line, size_t size) 261 { 262 static unsigned char seed = 1; 263 unsigned char *ptr = line; 264 unsigned char expected; 265 static int errors; 266 size_t i; 267 268 for (i = 0; i < size; i++) { 269 expected = seed ? seed : '\n'; 270 if (ptr[i] != expected) { 271 fprintf(stderr, 272 "Failed validation: expected=%u, actual=%u, index=%lu\n", 273 expected, ptr[i], i); 274 errors++; 275 if (errors > 20) { 276 pr_err("validation failed"); 277 return -1; 278 } 279 } 280 seed++; 281 if (seed == do_validation) 282 seed = 0; 283 } 284 285 fprintf(stdout, "Validated buffer\n"); 286 return 0; 287 } 288 289 static int 290 __run_command(char *out, size_t outlen, const char *cmd, va_list args) 291 { 292 char command[256]; 293 FILE *fp; 294 295 vsnprintf(command, sizeof(command), cmd, args); 296 297 fprintf(stderr, "Running: %s\n", command); 298 fp = popen(command, "r"); 299 if (!fp) 300 return -1; 301 if (out) { 302 size_t len; 303 304 if (!fgets(out, outlen, fp)) 305 return -1; 306 307 /* Remove trailing newline if present */ 308 len = strlen(out); 309 if (len && out[len - 1] == '\n') 310 out[len - 1] = '\0'; 311 } 312 return pclose(fp); 313 } 314 315 static int run_command(const char *cmd, ...) 316 { 317 va_list args; 318 int ret; 319 320 va_start(args, cmd); 321 ret = __run_command(NULL, 0, cmd, args); 322 va_end(args); 323 324 return ret; 325 } 326 327 static int ethtool_add_flow(const char *format, ...) 328 { 329 char local_output[256], cmd[256]; 330 const char *id_start; 331 int flow_idx, ret; 332 char *endptr; 333 long flow_id; 334 va_list args; 335 336 for (flow_idx = 0; flow_idx < MAX_FLOWS; flow_idx++) 337 if (ntuple_ids[flow_idx] == -1) 338 break; 339 if (flow_idx == MAX_FLOWS) { 340 fprintf(stderr, "Error: too many flows\n"); 341 return -1; 342 } 343 344 snprintf(cmd, sizeof(cmd), "ethtool -N %s %s", ifname, format); 345 346 va_start(args, format); 347 ret = __run_command(local_output, sizeof(local_output), cmd, args); 348 va_end(args); 349 350 if (ret != 0) 351 return ret; 352 353 /* Extract the ID from the output */ 354 id_start = strstr(local_output, "Added rule with ID "); 355 if (!id_start) 356 return -1; 357 id_start += strlen("Added rule with ID "); 358 359 flow_id = strtol(id_start, &endptr, 10); 360 if (endptr == id_start || flow_id < 0 || flow_id > INT_MAX) 361 return -1; 362 363 fprintf(stderr, "Added flow rule with ID %ld\n", flow_id); 364 ntuple_ids[flow_idx] = flow_id; 365 return flow_id; 366 } 367 368 static int rxq_num(int ifindex) 369 { 370 struct ethtool_channels_get_req *req; 371 struct ethtool_channels_get_rsp *rsp; 372 struct ynl_error yerr; 373 struct ynl_sock *ys; 374 int num = -1; 375 376 ys = ynl_sock_create(&ynl_ethtool_family, &yerr); 377 if (!ys) { 378 fprintf(stderr, "YNL: %s\n", yerr.msg); 379 return -1; 380 } 381 382 req = ethtool_channels_get_req_alloc(); 383 ethtool_channels_get_req_set_header_dev_index(req, ifindex); 384 rsp = ethtool_channels_get(ys, req); 385 if (rsp) 386 num = rsp->rx_count + rsp->combined_count; 387 ethtool_channels_get_req_free(req); 388 ethtool_channels_get_rsp_free(rsp); 389 390 ynl_sock_destroy(ys); 391 392 return num; 393 } 394 395 static void reset_flow_steering(void) 396 { 397 int i; 398 399 for (i = 0; i < MAX_FLOWS; i++) { 400 if (ntuple_ids[i] == -1) 401 continue; 402 run_command("ethtool -N %s delete %d", 403 ifname, ntuple_ids[i]); 404 ntuple_ids[i] = -1; 405 } 406 } 407 408 static const char *tcp_data_split_str(int val) 409 { 410 switch (val) { 411 case 0: 412 return "off"; 413 case 1: 414 return "auto"; 415 case 2: 416 return "on"; 417 default: 418 return "?"; 419 } 420 } 421 422 static struct ethtool_rings_get_rsp *get_ring_config(void) 423 { 424 struct ethtool_rings_get_req *get_req; 425 struct ethtool_rings_get_rsp *get_rsp; 426 struct ynl_error yerr; 427 struct ynl_sock *ys; 428 429 ys = ynl_sock_create(&ynl_ethtool_family, &yerr); 430 if (!ys) { 431 fprintf(stderr, "YNL: %s\n", yerr.msg); 432 return NULL; 433 } 434 435 get_req = ethtool_rings_get_req_alloc(); 436 ethtool_rings_get_req_set_header_dev_index(get_req, ifindex); 437 get_rsp = ethtool_rings_get(ys, get_req); 438 ethtool_rings_get_req_free(get_req); 439 440 ynl_sock_destroy(ys); 441 442 return get_rsp; 443 } 444 445 static void restore_ring_config(const struct ethtool_rings_get_rsp *config) 446 { 447 struct ethtool_rings_get_req *get_req; 448 struct ethtool_rings_get_rsp *get_rsp; 449 struct ethtool_rings_set_req *req; 450 struct ynl_error yerr; 451 struct ynl_sock *ys; 452 int ret; 453 454 if (!config) 455 return; 456 457 ys = ynl_sock_create(&ynl_ethtool_family, &yerr); 458 if (!ys) { 459 fprintf(stderr, "YNL: %s\n", yerr.msg); 460 return; 461 } 462 463 req = ethtool_rings_set_req_alloc(); 464 ethtool_rings_set_req_set_header_dev_index(req, ifindex); 465 ethtool_rings_set_req_set_tcp_data_split(req, 466 ETHTOOL_TCP_DATA_SPLIT_UNKNOWN); 467 if (config->_present.hds_thresh) 468 ethtool_rings_set_req_set_hds_thresh(req, config->hds_thresh); 469 470 ret = ethtool_rings_set(ys, req); 471 if (ret < 0) 472 fprintf(stderr, "YNL restoring HDS cfg: %s\n", ys->err.msg); 473 474 get_req = ethtool_rings_get_req_alloc(); 475 ethtool_rings_get_req_set_header_dev_index(get_req, ifindex); 476 get_rsp = ethtool_rings_get(ys, get_req); 477 ethtool_rings_get_req_free(get_req); 478 479 /* use explicit value if UKNOWN didn't give us the previous */ 480 if (get_rsp->tcp_data_split != config->tcp_data_split) { 481 ethtool_rings_set_req_set_tcp_data_split(req, 482 config->tcp_data_split); 483 ret = ethtool_rings_set(ys, req); 484 if (ret < 0) 485 fprintf(stderr, "YNL restoring expl HDS cfg: %s\n", 486 ys->err.msg); 487 } 488 489 ethtool_rings_get_rsp_free(get_rsp); 490 ethtool_rings_set_req_free(req); 491 492 ynl_sock_destroy(ys); 493 } 494 495 static int 496 configure_headersplit(const struct ethtool_rings_get_rsp *old, bool on) 497 { 498 struct ethtool_rings_get_req *get_req; 499 struct ethtool_rings_get_rsp *get_rsp; 500 struct ethtool_rings_set_req *req; 501 struct ynl_error yerr; 502 struct ynl_sock *ys; 503 int ret; 504 505 ys = ynl_sock_create(&ynl_ethtool_family, &yerr); 506 if (!ys) { 507 fprintf(stderr, "YNL: %s\n", yerr.msg); 508 return -1; 509 } 510 511 req = ethtool_rings_set_req_alloc(); 512 ethtool_rings_set_req_set_header_dev_index(req, ifindex); 513 if (on) { 514 ethtool_rings_set_req_set_tcp_data_split(req, 515 ETHTOOL_TCP_DATA_SPLIT_ENABLED); 516 if (old->_present.hds_thresh) 517 ethtool_rings_set_req_set_hds_thresh(req, 0); 518 } else { 519 ethtool_rings_set_req_set_tcp_data_split(req, 520 ETHTOOL_TCP_DATA_SPLIT_UNKNOWN); 521 } 522 ret = ethtool_rings_set(ys, req); 523 if (ret < 0) 524 fprintf(stderr, "YNL failed: %s\n", ys->err.msg); 525 ethtool_rings_set_req_free(req); 526 527 if (ret == 0) { 528 get_req = ethtool_rings_get_req_alloc(); 529 ethtool_rings_get_req_set_header_dev_index(get_req, ifindex); 530 get_rsp = ethtool_rings_get(ys, get_req); 531 ethtool_rings_get_req_free(get_req); 532 if (get_rsp) 533 fprintf(stderr, "TCP header split: %s\n", 534 tcp_data_split_str(get_rsp->tcp_data_split)); 535 ethtool_rings_get_rsp_free(get_rsp); 536 } 537 538 ynl_sock_destroy(ys); 539 540 return ret; 541 } 542 543 static int configure_rss(void) 544 { 545 return run_command("ethtool -X %s equal %d >&2", ifname, start_queue); 546 } 547 548 static void reset_rss(void) 549 { 550 run_command("ethtool -X %s default >&2", ifname, start_queue); 551 } 552 553 static int check_changing_channels(unsigned int rx, unsigned int tx) 554 { 555 struct ethtool_channels_get_req *gchan; 556 struct ethtool_channels_set_req *schan; 557 struct ethtool_channels_get_rsp *chan; 558 struct ynl_error yerr; 559 struct ynl_sock *ys; 560 int ret; 561 562 fprintf(stderr, "setting channel count rx:%u tx:%u\n", rx, tx); 563 564 ys = ynl_sock_create(&ynl_ethtool_family, &yerr); 565 if (!ys) { 566 fprintf(stderr, "YNL: %s\n", yerr.msg); 567 return -1; 568 } 569 570 gchan = ethtool_channels_get_req_alloc(); 571 if (!gchan) { 572 ret = -1; 573 goto exit_close_sock; 574 } 575 576 ethtool_channels_get_req_set_header_dev_index(gchan, ifindex); 577 chan = ethtool_channels_get(ys, gchan); 578 ethtool_channels_get_req_free(gchan); 579 if (!chan) { 580 fprintf(stderr, "YNL get channels: %s\n", ys->err.msg); 581 ret = -1; 582 goto exit_close_sock; 583 } 584 585 schan = ethtool_channels_set_req_alloc(); 586 if (!schan) { 587 ret = -1; 588 goto exit_free_chan; 589 } 590 591 ethtool_channels_set_req_set_header_dev_index(schan, ifindex); 592 593 if (chan->_present.combined_count) { 594 if (chan->_present.rx_count || chan->_present.tx_count) { 595 ethtool_channels_set_req_set_rx_count(schan, 0); 596 ethtool_channels_set_req_set_tx_count(schan, 0); 597 } 598 599 if (rx == tx) { 600 ethtool_channels_set_req_set_combined_count(schan, rx); 601 } else if (rx > tx) { 602 ethtool_channels_set_req_set_combined_count(schan, tx); 603 ethtool_channels_set_req_set_rx_count(schan, rx - tx); 604 } else { 605 ethtool_channels_set_req_set_combined_count(schan, rx); 606 ethtool_channels_set_req_set_tx_count(schan, tx - rx); 607 } 608 609 } else if (chan->_present.rx_count) { 610 ethtool_channels_set_req_set_rx_count(schan, rx); 611 ethtool_channels_set_req_set_tx_count(schan, tx); 612 } else { 613 fprintf(stderr, "Error: device has neither combined nor rx channels\n"); 614 ret = -1; 615 goto exit_free_schan; 616 } 617 618 ret = ethtool_channels_set(ys, schan); 619 if (ret) { 620 fprintf(stderr, "YNL set channels: %s\n", ys->err.msg); 621 } else { 622 /* We were expecting a failure, go back to previous settings */ 623 ethtool_channels_set_req_set_combined_count(schan, 624 chan->combined_count); 625 ethtool_channels_set_req_set_rx_count(schan, chan->rx_count); 626 ethtool_channels_set_req_set_tx_count(schan, chan->tx_count); 627 628 ret = ethtool_channels_set(ys, schan); 629 if (ret) 630 fprintf(stderr, "YNL un-setting channels: %s\n", 631 ys->err.msg); 632 } 633 634 exit_free_schan: 635 ethtool_channels_set_req_free(schan); 636 exit_free_chan: 637 ethtool_channels_get_rsp_free(chan); 638 exit_close_sock: 639 ynl_sock_destroy(ys); 640 641 return ret; 642 } 643 644 static int configure_flow_steering(struct sockaddr_in6 *server_sin) 645 { 646 const char *type = "tcp6"; 647 const char *server_addr; 648 char buf[40]; 649 int flow_id; 650 651 inet_ntop(AF_INET6, &server_sin->sin6_addr, buf, sizeof(buf)); 652 server_addr = buf; 653 654 if (IN6_IS_ADDR_V4MAPPED(&server_sin->sin6_addr)) { 655 type = "tcp4"; 656 server_addr = strrchr(server_addr, ':') + 1; 657 } 658 659 /* Try configure 5-tuple */ 660 flow_id = ethtool_add_flow("flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d", 661 type, 662 client_ip ? "src-ip" : "", 663 client_ip ?: "", 664 server_addr, 665 client_ip ? "src-port" : "", 666 client_ip ? port : "", 667 port, start_queue); 668 if (flow_id < 0) { 669 /* If that fails, try configure 3-tuple */ 670 flow_id = ethtool_add_flow("flow-type %s dst-ip %s dst-port %s queue %d", 671 type, server_addr, port, start_queue); 672 if (flow_id < 0) 673 /* If that fails, return error */ 674 return -1; 675 } 676 677 return 0; 678 } 679 680 static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd, 681 struct netdev_queue_id *queues, 682 unsigned int n_queue_index, struct ynl_sock **ys) 683 { 684 struct netdev_bind_rx_req *req = NULL; 685 struct netdev_bind_rx_rsp *rsp = NULL; 686 struct ynl_error yerr; 687 688 *ys = ynl_sock_create(&ynl_netdev_family, &yerr); 689 if (!*ys) { 690 netdev_queue_id_free(queues); 691 fprintf(stderr, "YNL: %s\n", yerr.msg); 692 return -1; 693 } 694 695 req = netdev_bind_rx_req_alloc(); 696 netdev_bind_rx_req_set_ifindex(req, ifindex); 697 netdev_bind_rx_req_set_fd(req, dmabuf_fd); 698 __netdev_bind_rx_req_set_queues(req, queues, n_queue_index); 699 700 rsp = netdev_bind_rx(*ys, req); 701 if (!rsp) { 702 perror("netdev_bind_rx"); 703 goto err_close; 704 } 705 706 if (!rsp->_present.id) { 707 perror("id not present"); 708 goto err_close; 709 } 710 711 fprintf(stderr, "got dmabuf id=%d\n", rsp->id); 712 dmabuf_id = rsp->id; 713 714 netdev_bind_rx_req_free(req); 715 netdev_bind_rx_rsp_free(rsp); 716 717 return 0; 718 719 err_close: 720 fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg); 721 netdev_bind_rx_req_free(req); 722 ynl_sock_destroy(*ys); 723 return -1; 724 } 725 726 static int bind_tx_queue(unsigned int ifindex, unsigned int dmabuf_fd, 727 struct ynl_sock **ys) 728 { 729 struct netdev_bind_tx_req *req = NULL; 730 struct netdev_bind_tx_rsp *rsp = NULL; 731 struct ynl_error yerr; 732 733 *ys = ynl_sock_create(&ynl_netdev_family, &yerr); 734 if (!*ys) { 735 fprintf(stderr, "YNL: %s\n", yerr.msg); 736 return -1; 737 } 738 739 req = netdev_bind_tx_req_alloc(); 740 netdev_bind_tx_req_set_ifindex(req, ifindex); 741 netdev_bind_tx_req_set_fd(req, dmabuf_fd); 742 743 rsp = netdev_bind_tx(*ys, req); 744 if (!rsp) { 745 perror("netdev_bind_tx"); 746 goto err_close; 747 } 748 749 if (!rsp->_present.id) { 750 perror("id not present"); 751 goto err_close; 752 } 753 754 fprintf(stderr, "got tx dmabuf id=%d\n", rsp->id); 755 tx_dmabuf_id = rsp->id; 756 757 netdev_bind_tx_req_free(req); 758 netdev_bind_tx_rsp_free(rsp); 759 760 return 0; 761 762 err_close: 763 fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg); 764 netdev_bind_tx_req_free(req); 765 ynl_sock_destroy(*ys); 766 return -1; 767 } 768 769 static int enable_reuseaddr(int fd) 770 { 771 int opt = 1; 772 int ret; 773 774 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)); 775 if (ret) { 776 pr_err("SO_REUSEPORT failed"); 777 return -1; 778 } 779 780 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); 781 if (ret) { 782 pr_err("SO_REUSEADDR failed"); 783 return -1; 784 } 785 786 return 0; 787 } 788 789 static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6) 790 { 791 int ret; 792 793 sin6->sin6_family = AF_INET6; 794 sin6->sin6_port = htons(port); 795 796 ret = inet_pton(sin6->sin6_family, str, &sin6->sin6_addr); 797 if (ret != 1) { 798 /* fallback to plain IPv4 */ 799 ret = inet_pton(AF_INET, str, &sin6->sin6_addr.s6_addr32[3]); 800 if (ret != 1) 801 return -1; 802 803 /* add ::ffff prefix */ 804 sin6->sin6_addr.s6_addr32[0] = 0; 805 sin6->sin6_addr.s6_addr32[1] = 0; 806 sin6->sin6_addr.s6_addr16[4] = 0; 807 sin6->sin6_addr.s6_addr16[5] = 0xffff; 808 } 809 810 return 0; 811 } 812 813 static struct netdev_queue_id *create_queues(void) 814 { 815 struct netdev_queue_id *queues; 816 size_t i = 0; 817 818 queues = netdev_queue_id_alloc(num_queues); 819 for (i = 0; i < num_queues; i++) { 820 netdev_queue_id_set_type(&queues[i], NETDEV_QUEUE_TYPE_RX); 821 netdev_queue_id_set_id(&queues[i], start_queue + i); 822 } 823 824 return queues; 825 } 826 827 static int do_server(struct memory_buffer *mem) 828 { 829 struct ethtool_rings_get_rsp *ring_config; 830 char ctrl_data[sizeof(int) * 20000]; 831 size_t non_page_aligned_frags = 0; 832 struct sockaddr_in6 client_addr; 833 struct sockaddr_in6 server_sin; 834 size_t page_aligned_frags = 0; 835 size_t total_received = 0; 836 socklen_t client_addr_len; 837 bool is_devmem = false; 838 char *tmp_mem = NULL; 839 struct ynl_sock *ys; 840 char iobuf[819200]; 841 int ret, err = -1; 842 char buffer[256]; 843 int socket_fd; 844 int client_fd; 845 846 ret = parse_address(server_ip, atoi(port), &server_sin); 847 if (ret < 0) { 848 pr_err("parse server address"); 849 return -1; 850 } 851 852 ring_config = get_ring_config(); 853 if (!ring_config) { 854 pr_err("Failed to get current ring configuration"); 855 return -1; 856 } 857 858 if (configure_headersplit(ring_config, 1)) { 859 pr_err("Failed to enable TCP header split"); 860 goto err_free_ring_config; 861 } 862 863 /* Configure RSS to divert all traffic from our devmem queues */ 864 if (configure_rss()) { 865 pr_err("Failed to configure rss"); 866 goto err_reset_headersplit; 867 } 868 869 /* Flow steer our devmem flows to start_queue */ 870 if (configure_flow_steering(&server_sin)) { 871 pr_err("Failed to configure flow steering"); 872 goto err_reset_rss; 873 } 874 875 sleep(1); 876 877 if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys)) { 878 pr_err("Failed to bind"); 879 goto err_reset_flow_steering; 880 } 881 882 tmp_mem = malloc(mem->size); 883 if (!tmp_mem) 884 goto err_unbind; 885 886 socket_fd = socket(AF_INET6, SOCK_STREAM, 0); 887 if (socket_fd < 0) { 888 pr_err("Failed to create socket"); 889 goto err_free_tmp; 890 } 891 892 if (enable_reuseaddr(socket_fd)) 893 goto err_close_socket; 894 895 fprintf(stderr, "binding to address %s:%d\n", server_ip, 896 ntohs(server_sin.sin6_port)); 897 898 ret = bind(socket_fd, &server_sin, sizeof(server_sin)); 899 if (ret) { 900 pr_err("Failed to bind"); 901 goto err_close_socket; 902 } 903 904 ret = listen(socket_fd, 1); 905 if (ret) { 906 pr_err("Failed to listen"); 907 goto err_close_socket; 908 } 909 910 client_addr_len = sizeof(client_addr); 911 912 inet_ntop(AF_INET6, &server_sin.sin6_addr, buffer, 913 sizeof(buffer)); 914 fprintf(stderr, "Waiting or connection on %s:%d\n", buffer, 915 ntohs(server_sin.sin6_port)); 916 client_fd = accept(socket_fd, &client_addr, &client_addr_len); 917 if (client_fd < 0) { 918 pr_err("Failed to accept"); 919 goto err_close_socket; 920 } 921 922 inet_ntop(AF_INET6, &client_addr.sin6_addr, buffer, 923 sizeof(buffer)); 924 fprintf(stderr, "Got connection from %s:%d\n", buffer, 925 ntohs(client_addr.sin6_port)); 926 927 while (1) { 928 struct iovec iov = { .iov_base = iobuf, 929 .iov_len = sizeof(iobuf) }; 930 struct dmabuf_cmsg *dmabuf_cmsg = NULL; 931 struct cmsghdr *cm = NULL; 932 struct msghdr msg = { 0 }; 933 struct dmabuf_token token; 934 ssize_t ret; 935 936 is_devmem = false; 937 938 msg.msg_iov = &iov; 939 msg.msg_iovlen = 1; 940 msg.msg_control = ctrl_data; 941 msg.msg_controllen = sizeof(ctrl_data); 942 ret = recvmsg(client_fd, &msg, MSG_SOCK_DEVMEM); 943 fprintf(stderr, "recvmsg ret=%ld\n", ret); 944 if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) 945 continue; 946 if (ret < 0) { 947 perror("recvmsg"); 948 if (errno == EFAULT) { 949 pr_err("received EFAULT, won't recover"); 950 goto err_close_client; 951 } 952 continue; 953 } 954 if (ret == 0) { 955 errno = 0; 956 pr_err("client exited"); 957 goto cleanup; 958 } 959 960 for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) { 961 if (cm->cmsg_level != SOL_SOCKET || 962 (cm->cmsg_type != SCM_DEVMEM_DMABUF && 963 cm->cmsg_type != SCM_DEVMEM_LINEAR)) { 964 fprintf(stderr, "skipping non-devmem cmsg\n"); 965 continue; 966 } 967 968 dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm); 969 is_devmem = true; 970 971 if (cm->cmsg_type == SCM_DEVMEM_LINEAR) { 972 /* TODO: process data copied from skb's linear 973 * buffer. 974 */ 975 fprintf(stderr, 976 "SCM_DEVMEM_LINEAR. dmabuf_cmsg->frag_size=%u\n", 977 dmabuf_cmsg->frag_size); 978 979 continue; 980 } 981 982 token.token_start = dmabuf_cmsg->frag_token; 983 token.token_count = 1; 984 985 total_received += dmabuf_cmsg->frag_size; 986 fprintf(stderr, 987 "received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n", 988 dmabuf_cmsg->frag_offset >> PAGE_SHIFT, 989 dmabuf_cmsg->frag_offset % getpagesize(), 990 dmabuf_cmsg->frag_offset, 991 dmabuf_cmsg->frag_size, dmabuf_cmsg->frag_token, 992 total_received, dmabuf_cmsg->dmabuf_id); 993 994 if (dmabuf_cmsg->dmabuf_id != dmabuf_id) { 995 pr_err("received on wrong dmabuf_id: flow steering error"); 996 goto err_close_client; 997 } 998 999 if (dmabuf_cmsg->frag_size % getpagesize()) 1000 non_page_aligned_frags++; 1001 else 1002 page_aligned_frags++; 1003 1004 provider->memcpy_from_device(tmp_mem, mem, 1005 dmabuf_cmsg->frag_offset, 1006 dmabuf_cmsg->frag_size); 1007 1008 if (do_validation) { 1009 if (validate_buffer(tmp_mem, 1010 dmabuf_cmsg->frag_size)) 1011 goto err_close_client; 1012 } else { 1013 print_nonzero_bytes(tmp_mem, 1014 dmabuf_cmsg->frag_size); 1015 } 1016 1017 ret = setsockopt(client_fd, SOL_SOCKET, 1018 SO_DEVMEM_DONTNEED, &token, 1019 sizeof(token)); 1020 if (ret != 1) { 1021 pr_err("SO_DEVMEM_DONTNEED not enough tokens"); 1022 goto err_close_client; 1023 } 1024 } 1025 if (!is_devmem) { 1026 pr_err("flow steering error"); 1027 goto err_close_client; 1028 } 1029 1030 fprintf(stderr, "total_received=%lu\n", total_received); 1031 } 1032 1033 fprintf(stderr, "%s: ok\n", TEST_PREFIX); 1034 1035 fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n", 1036 page_aligned_frags, non_page_aligned_frags); 1037 1038 cleanup: 1039 err = 0; 1040 1041 err_close_client: 1042 close(client_fd); 1043 err_close_socket: 1044 close(socket_fd); 1045 err_free_tmp: 1046 free(tmp_mem); 1047 err_unbind: 1048 ynl_sock_destroy(ys); 1049 err_reset_flow_steering: 1050 reset_flow_steering(); 1051 err_reset_rss: 1052 reset_rss(); 1053 err_reset_headersplit: 1054 restore_ring_config(ring_config); 1055 err_free_ring_config: 1056 ethtool_rings_get_rsp_free(ring_config); 1057 return err; 1058 } 1059 1060 int run_devmem_tests(void) 1061 { 1062 struct ethtool_rings_get_rsp *ring_config; 1063 struct netdev_queue_id *queues; 1064 struct memory_buffer *mem; 1065 struct ynl_sock *ys; 1066 int err = -1; 1067 1068 mem = provider->alloc(getpagesize() * NUM_PAGES); 1069 if (!mem) { 1070 pr_err("Failed to allocate memory buffer"); 1071 return -1; 1072 } 1073 1074 ring_config = get_ring_config(); 1075 if (!ring_config) { 1076 pr_err("Failed to get current ring configuration"); 1077 goto err_free_mem; 1078 } 1079 1080 /* Configure RSS to divert all traffic from our devmem queues */ 1081 if (configure_rss()) { 1082 pr_err("rss error"); 1083 goto err_free_ring_config; 1084 } 1085 1086 if (configure_headersplit(ring_config, 1)) { 1087 pr_err("Failed to configure header split"); 1088 goto err_reset_rss; 1089 } 1090 1091 queues = netdev_queue_id_alloc(num_queues); 1092 if (!queues) { 1093 pr_err("Failed to allocate empty queues array"); 1094 goto err_reset_headersplit; 1095 } 1096 1097 if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) { 1098 pr_err("Binding empty queues array should have failed"); 1099 goto err_unbind; 1100 } 1101 1102 if (configure_headersplit(ring_config, 0)) { 1103 pr_err("Failed to configure header split"); 1104 goto err_reset_headersplit; 1105 } 1106 1107 queues = create_queues(); 1108 if (!queues) { 1109 pr_err("Failed to create queues"); 1110 goto err_reset_headersplit; 1111 } 1112 1113 if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) { 1114 pr_err("Configure dmabuf with header split off should have failed"); 1115 goto err_unbind; 1116 } 1117 1118 if (configure_headersplit(ring_config, 1)) { 1119 pr_err("Failed to configure header split"); 1120 goto err_reset_headersplit; 1121 } 1122 1123 queues = create_queues(); 1124 if (!queues) { 1125 pr_err("Failed to create queues"); 1126 goto err_reset_headersplit; 1127 } 1128 1129 if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) { 1130 pr_err("Failed to bind"); 1131 goto err_reset_headersplit; 1132 } 1133 1134 /* Deactivating a bound queue should not be legal */ 1135 if (!check_changing_channels(num_queues, num_queues)) { 1136 pr_err("Deactivating a bound queue should be illegal"); 1137 goto err_unbind; 1138 } 1139 1140 err = 0; 1141 goto err_unbind; 1142 1143 err_unbind: 1144 ynl_sock_destroy(ys); 1145 err_reset_headersplit: 1146 restore_ring_config(ring_config); 1147 err_reset_rss: 1148 reset_rss(); 1149 err_free_ring_config: 1150 ethtool_rings_get_rsp_free(ring_config); 1151 err_free_mem: 1152 provider->free(mem); 1153 return err; 1154 } 1155 1156 static uint64_t gettimeofday_ms(void) 1157 { 1158 struct timeval tv; 1159 1160 gettimeofday(&tv, NULL); 1161 return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000ULL); 1162 } 1163 1164 static int do_poll(int fd) 1165 { 1166 struct pollfd pfd; 1167 int ret; 1168 1169 pfd.revents = 0; 1170 pfd.fd = fd; 1171 1172 ret = poll(&pfd, 1, waittime_ms); 1173 if (ret == -1) { 1174 pr_err("poll"); 1175 return -1; 1176 } 1177 1178 return ret && (pfd.revents & POLLERR); 1179 } 1180 1181 static int wait_compl(int fd) 1182 { 1183 int64_t tstop = gettimeofday_ms() + waittime_ms; 1184 char control[CMSG_SPACE(100)] = {}; 1185 struct sock_extended_err *serr; 1186 struct msghdr msg = {}; 1187 struct cmsghdr *cm; 1188 __u32 hi, lo; 1189 int ret; 1190 1191 msg.msg_control = control; 1192 msg.msg_controllen = sizeof(control); 1193 1194 while (gettimeofday_ms() < tstop) { 1195 ret = do_poll(fd); 1196 if (ret < 0) 1197 return ret; 1198 if (!ret) 1199 continue; 1200 1201 ret = recvmsg(fd, &msg, MSG_ERRQUEUE); 1202 if (ret < 0) { 1203 if (errno == EAGAIN) 1204 continue; 1205 pr_err("recvmsg(MSG_ERRQUEUE)"); 1206 return -1; 1207 } 1208 if (msg.msg_flags & MSG_CTRUNC) { 1209 pr_err("MSG_CTRUNC"); 1210 return -1; 1211 } 1212 1213 for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) { 1214 if (cm->cmsg_level != SOL_IP && 1215 cm->cmsg_level != SOL_IPV6) 1216 continue; 1217 if (cm->cmsg_level == SOL_IP && 1218 cm->cmsg_type != IP_RECVERR) 1219 continue; 1220 if (cm->cmsg_level == SOL_IPV6 && 1221 cm->cmsg_type != IPV6_RECVERR) 1222 continue; 1223 1224 serr = (void *)CMSG_DATA(cm); 1225 if (serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) { 1226 pr_err("wrong origin %u", serr->ee_origin); 1227 return -1; 1228 } 1229 if (serr->ee_errno != 0) { 1230 pr_err("wrong errno %d", serr->ee_errno); 1231 return -1; 1232 } 1233 1234 hi = serr->ee_data; 1235 lo = serr->ee_info; 1236 1237 fprintf(stderr, "tx complete [%d,%d]\n", lo, hi); 1238 return 0; 1239 } 1240 } 1241 1242 pr_err("did not receive tx completion"); 1243 return -1; 1244 } 1245 1246 static int do_client(struct memory_buffer *mem) 1247 { 1248 char ctrl_data[CMSG_SPACE(sizeof(__u32))]; 1249 struct sockaddr_in6 server_sin; 1250 struct sockaddr_in6 client_sin; 1251 struct ynl_sock *ys = NULL; 1252 struct iovec iov[MAX_IOV]; 1253 struct msghdr msg = {}; 1254 ssize_t line_size = 0; 1255 struct cmsghdr *cmsg; 1256 char *line = NULL; 1257 int ret, err = -1; 1258 size_t len = 0; 1259 int socket_fd; 1260 __u32 ddmabuf; 1261 int opt = 1; 1262 1263 ret = parse_address(server_ip, atoi(port), &server_sin); 1264 if (ret < 0) { 1265 pr_err("parse server address"); 1266 return -1; 1267 } 1268 1269 if (client_ip) { 1270 ret = parse_address(client_ip, atoi(port), &client_sin); 1271 if (ret < 0) { 1272 pr_err("parse client address"); 1273 return ret; 1274 } 1275 } 1276 1277 socket_fd = socket(AF_INET6, SOCK_STREAM, 0); 1278 if (socket_fd < 0) { 1279 pr_err("create socket"); 1280 return -1; 1281 } 1282 1283 if (enable_reuseaddr(socket_fd)) 1284 goto err_close_socket; 1285 1286 ret = setsockopt(socket_fd, SOL_SOCKET, SO_BINDTODEVICE, ifname, 1287 strlen(ifname) + 1); 1288 if (ret) { 1289 pr_err("bindtodevice"); 1290 goto err_close_socket; 1291 } 1292 1293 if (bind_tx_queue(ifindex, mem->fd, &ys)) { 1294 pr_err("Failed to bind"); 1295 goto err_close_socket; 1296 } 1297 1298 if (client_ip) { 1299 ret = bind(socket_fd, &client_sin, sizeof(client_sin)); 1300 if (ret) { 1301 pr_err("bind"); 1302 goto err_unbind; 1303 } 1304 } 1305 1306 ret = setsockopt(socket_fd, SOL_SOCKET, SO_ZEROCOPY, &opt, sizeof(opt)); 1307 if (ret) { 1308 pr_err("set sock opt"); 1309 goto err_unbind; 1310 } 1311 1312 fprintf(stderr, "Connect to %s %d (via %s)\n", server_ip, 1313 ntohs(server_sin.sin6_port), ifname); 1314 1315 ret = connect(socket_fd, &server_sin, sizeof(server_sin)); 1316 if (ret) { 1317 pr_err("connect"); 1318 goto err_unbind; 1319 } 1320 1321 while (1) { 1322 free(line); 1323 line = NULL; 1324 line_size = getline(&line, &len, stdin); 1325 1326 if (line_size < 0) 1327 break; 1328 1329 if (max_chunk) { 1330 msg.msg_iovlen = 1331 (line_size + max_chunk - 1) / max_chunk; 1332 if (msg.msg_iovlen > MAX_IOV) { 1333 pr_err("can't partition %zd bytes into maximum of %d chunks", 1334 line_size, MAX_IOV); 1335 goto err_free_line; 1336 } 1337 1338 for (int i = 0; i < msg.msg_iovlen; i++) { 1339 iov[i].iov_base = (void *)(i * max_chunk); 1340 iov[i].iov_len = max_chunk; 1341 } 1342 1343 iov[msg.msg_iovlen - 1].iov_len = 1344 line_size - (msg.msg_iovlen - 1) * max_chunk; 1345 } else { 1346 iov[0].iov_base = 0; 1347 iov[0].iov_len = line_size; 1348 msg.msg_iovlen = 1; 1349 } 1350 1351 msg.msg_iov = iov; 1352 provider->memcpy_to_device(mem, 0, line, line_size); 1353 1354 msg.msg_control = ctrl_data; 1355 msg.msg_controllen = sizeof(ctrl_data); 1356 1357 cmsg = CMSG_FIRSTHDR(&msg); 1358 cmsg->cmsg_level = SOL_SOCKET; 1359 cmsg->cmsg_type = SCM_DEVMEM_DMABUF; 1360 cmsg->cmsg_len = CMSG_LEN(sizeof(__u32)); 1361 1362 ddmabuf = tx_dmabuf_id; 1363 1364 *((__u32 *)CMSG_DATA(cmsg)) = ddmabuf; 1365 1366 ret = sendmsg(socket_fd, &msg, MSG_ZEROCOPY); 1367 if (ret < 0) { 1368 pr_err("Failed sendmsg"); 1369 goto err_free_line; 1370 } 1371 1372 fprintf(stderr, "sendmsg_ret=%d\n", ret); 1373 1374 if (ret != line_size) { 1375 pr_err("Did not send all bytes %d vs %zd", ret, line_size); 1376 goto err_free_line; 1377 } 1378 1379 if (wait_compl(socket_fd)) 1380 goto err_free_line; 1381 } 1382 1383 fprintf(stderr, "%s: tx ok\n", TEST_PREFIX); 1384 1385 err = 0; 1386 1387 err_free_line: 1388 free(line); 1389 err_unbind: 1390 ynl_sock_destroy(ys); 1391 err_close_socket: 1392 close(socket_fd); 1393 return err; 1394 } 1395 1396 int main(int argc, char *argv[]) 1397 { 1398 struct memory_buffer *mem; 1399 int is_server = 0, opt; 1400 int ret, err = 1; 1401 1402 while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:z:")) != -1) { 1403 switch (opt) { 1404 case 'l': 1405 is_server = 1; 1406 break; 1407 case 's': 1408 server_ip = optarg; 1409 break; 1410 case 'c': 1411 client_ip = optarg; 1412 break; 1413 case 'p': 1414 port = optarg; 1415 break; 1416 case 'v': 1417 do_validation = atoll(optarg); 1418 break; 1419 case 'q': 1420 num_queues = atoi(optarg); 1421 break; 1422 case 't': 1423 start_queue = atoi(optarg); 1424 break; 1425 case 'f': 1426 ifname = optarg; 1427 break; 1428 case 'z': 1429 max_chunk = atoi(optarg); 1430 break; 1431 case '?': 1432 fprintf(stderr, "unknown option: %c\n", optopt); 1433 break; 1434 } 1435 } 1436 1437 if (!ifname) { 1438 pr_err("Missing -f argument"); 1439 return 1; 1440 } 1441 1442 ifindex = if_nametoindex(ifname); 1443 1444 fprintf(stderr, "using ifindex=%u\n", ifindex); 1445 1446 if (!server_ip && !client_ip) { 1447 if (start_queue < 0 && num_queues < 0) { 1448 num_queues = rxq_num(ifindex); 1449 if (num_queues < 0) { 1450 pr_err("couldn't detect number of queues"); 1451 return 1; 1452 } 1453 if (num_queues < 2) { 1454 pr_err("number of device queues is too low"); 1455 return 1; 1456 } 1457 /* make sure can bind to multiple queues */ 1458 start_queue = num_queues / 2; 1459 num_queues /= 2; 1460 } 1461 1462 if (start_queue < 0 || num_queues < 0) { 1463 pr_err("Both -t and -q are required"); 1464 return 1; 1465 } 1466 1467 return run_devmem_tests(); 1468 } 1469 1470 if (start_queue < 0 && num_queues < 0) { 1471 num_queues = rxq_num(ifindex); 1472 if (num_queues < 2) { 1473 pr_err("number of device queues is too low"); 1474 return 1; 1475 } 1476 1477 num_queues = 1; 1478 start_queue = rxq_num(ifindex) - num_queues; 1479 1480 if (start_queue < 0) { 1481 pr_err("couldn't detect number of queues"); 1482 return 1; 1483 } 1484 1485 fprintf(stderr, "using queues %d..%d\n", start_queue, start_queue + num_queues); 1486 } 1487 1488 for (; optind < argc; optind++) 1489 fprintf(stderr, "extra arguments: %s\n", argv[optind]); 1490 1491 if (start_queue < 0) { 1492 pr_err("Missing -t argument"); 1493 return 1; 1494 } 1495 1496 if (num_queues < 0) { 1497 pr_err("Missing -q argument"); 1498 return 1; 1499 } 1500 1501 if (!server_ip) { 1502 pr_err("Missing -s argument"); 1503 return 1; 1504 } 1505 1506 if (!port) { 1507 pr_err("Missing -p argument"); 1508 return 1; 1509 } 1510 1511 mem = provider->alloc(getpagesize() * NUM_PAGES); 1512 if (!mem) { 1513 pr_err("Failed to allocate memory buffer"); 1514 return 1; 1515 } 1516 1517 ret = is_server ? do_server(mem) : do_client(mem); 1518 if (ret) 1519 goto err_free_mem; 1520 1521 err = 0; 1522 1523 err_free_mem: 1524 provider->free(mem); 1525 return err; 1526 } 1527