1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * tcpdevmem netcat. Works similarly to netcat but does device memory TCP 4 * instead of regular TCP. Uses udmabuf to mock a dmabuf provider. 5 * 6 * Usage: 7 * 8 * On server: 9 * ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201 10 * 11 * On client: 12 * echo -n "hello\nworld" | \ 13 * ncdevmem -s <server IP> [-c <client IP>] -p 5201 -f eth1 14 * 15 * Note this is compatible with regular netcat. i.e. the sender or receiver can 16 * be replaced with regular netcat to test the RX or TX path in isolation. 17 * 18 * Test data validation (devmem TCP on RX only): 19 * 20 * On server: 21 * ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201 -v 7 22 * 23 * On client: 24 * yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \ 25 * head -c 1G | \ 26 * nc <server IP> 5201 -p 5201 27 * 28 * Test data validation (devmem TCP on RX and TX, validation happens on RX): 29 * 30 * On server: 31 * ncdevmem -s <server IP> [-c <client IP>] -l -p 5201 -v 8 -f eth1 32 * 33 * On client: 34 * yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06\\x07) | \ 35 * head -c 1M | \ 36 * ncdevmem -s <server IP> [-c <client IP>] -p 5201 -f eth1 37 */ 38 #define _GNU_SOURCE 39 #define __EXPORTED_HEADERS__ 40 41 #include <linux/uio.h> 42 #include <stdio.h> 43 #include <stdlib.h> 44 #include <unistd.h> 45 #include <stdbool.h> 46 #include <string.h> 47 #include <errno.h> 48 #define __iovec_defined 49 #include <fcntl.h> 50 #include <malloc.h> 51 #include <error.h> 52 #include <poll.h> 53 54 #include <arpa/inet.h> 55 #include <sys/socket.h> 56 #include <sys/mman.h> 57 #include <sys/ioctl.h> 58 #include <sys/syscall.h> 59 #include <sys/time.h> 60 61 #include <linux/memfd.h> 62 #include <linux/dma-buf.h> 63 #include <linux/errqueue.h> 64 #include <linux/udmabuf.h> 65 #include <linux/types.h> 66 #include <linux/netlink.h> 67 #include <linux/genetlink.h> 68 #include <linux/netdev.h> 69 #include <linux/ethtool_netlink.h> 70 #include <time.h> 71 #include <net/if.h> 72 73 #include "netdev-user.h" 74 #include "ethtool-user.h" 75 #include <ynl.h> 76 77 #define PAGE_SHIFT 12 78 #define TEST_PREFIX "ncdevmem" 79 #define NUM_PAGES 16000 80 81 #ifndef MSG_SOCK_DEVMEM 82 #define MSG_SOCK_DEVMEM 0x2000000 83 #endif 84 85 #define MAX_IOV 1024 86 87 static size_t max_chunk; 88 static char *server_ip; 89 static char *client_ip; 90 static char *port; 91 static size_t do_validation; 92 static int start_queue = -1; 93 static int num_queues = -1; 94 static char *ifname; 95 static unsigned int ifindex; 96 static unsigned int dmabuf_id; 97 static uint32_t tx_dmabuf_id; 98 static int waittime_ms = 500; 99 100 struct memory_buffer { 101 int fd; 102 size_t size; 103 104 int devfd; 105 int memfd; 106 char *buf_mem; 107 }; 108 109 struct memory_provider { 110 struct memory_buffer *(*alloc)(size_t size); 111 void (*free)(struct memory_buffer *ctx); 112 void (*memcpy_to_device)(struct memory_buffer *dst, size_t off, 113 void *src, int n); 114 void (*memcpy_from_device)(void *dst, struct memory_buffer *src, 115 size_t off, int n); 116 }; 117 118 static struct memory_buffer *udmabuf_alloc(size_t size) 119 { 120 struct udmabuf_create create; 121 struct memory_buffer *ctx; 122 int ret; 123 124 ctx = malloc(sizeof(*ctx)); 125 if (!ctx) 126 error(1, ENOMEM, "malloc failed"); 127 128 ctx->size = size; 129 130 ctx->devfd = open("/dev/udmabuf", O_RDWR); 131 if (ctx->devfd < 0) 132 error(1, errno, 133 "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n", 134 TEST_PREFIX); 135 136 ctx->memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING); 137 if (ctx->memfd < 0) 138 error(1, errno, "%s: [skip,no-memfd]\n", TEST_PREFIX); 139 140 ret = fcntl(ctx->memfd, F_ADD_SEALS, F_SEAL_SHRINK); 141 if (ret < 0) 142 error(1, errno, "%s: [skip,fcntl-add-seals]\n", TEST_PREFIX); 143 144 ret = ftruncate(ctx->memfd, size); 145 if (ret == -1) 146 error(1, errno, "%s: [FAIL,memfd-truncate]\n", TEST_PREFIX); 147 148 memset(&create, 0, sizeof(create)); 149 150 create.memfd = ctx->memfd; 151 create.offset = 0; 152 create.size = size; 153 ctx->fd = ioctl(ctx->devfd, UDMABUF_CREATE, &create); 154 if (ctx->fd < 0) 155 error(1, errno, "%s: [FAIL, create udmabuf]\n", TEST_PREFIX); 156 157 ctx->buf_mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, 158 ctx->fd, 0); 159 if (ctx->buf_mem == MAP_FAILED) 160 error(1, errno, "%s: [FAIL, map udmabuf]\n", TEST_PREFIX); 161 162 return ctx; 163 } 164 165 static void udmabuf_free(struct memory_buffer *ctx) 166 { 167 munmap(ctx->buf_mem, ctx->size); 168 close(ctx->fd); 169 close(ctx->memfd); 170 close(ctx->devfd); 171 free(ctx); 172 } 173 174 static void udmabuf_memcpy_to_device(struct memory_buffer *dst, size_t off, 175 void *src, int n) 176 { 177 struct dma_buf_sync sync = {}; 178 179 sync.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_WRITE; 180 ioctl(dst->fd, DMA_BUF_IOCTL_SYNC, &sync); 181 182 memcpy(dst->buf_mem + off, src, n); 183 184 sync.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_WRITE; 185 ioctl(dst->fd, DMA_BUF_IOCTL_SYNC, &sync); 186 } 187 188 static void udmabuf_memcpy_from_device(void *dst, struct memory_buffer *src, 189 size_t off, int n) 190 { 191 struct dma_buf_sync sync = {}; 192 193 sync.flags = DMA_BUF_SYNC_START; 194 ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync); 195 196 memcpy(dst, src->buf_mem + off, n); 197 198 sync.flags = DMA_BUF_SYNC_END; 199 ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync); 200 } 201 202 static struct memory_provider udmabuf_memory_provider = { 203 .alloc = udmabuf_alloc, 204 .free = udmabuf_free, 205 .memcpy_to_device = udmabuf_memcpy_to_device, 206 .memcpy_from_device = udmabuf_memcpy_from_device, 207 }; 208 209 static struct memory_provider *provider = &udmabuf_memory_provider; 210 211 static void print_nonzero_bytes(void *ptr, size_t size) 212 { 213 unsigned char *p = ptr; 214 unsigned int i; 215 216 for (i = 0; i < size; i++) 217 putchar(p[i]); 218 } 219 220 void validate_buffer(void *line, size_t size) 221 { 222 static unsigned char seed = 1; 223 unsigned char *ptr = line; 224 unsigned char expected; 225 static int errors; 226 size_t i; 227 228 for (i = 0; i < size; i++) { 229 expected = seed ? seed : '\n'; 230 if (ptr[i] != expected) { 231 fprintf(stderr, 232 "Failed validation: expected=%u, actual=%u, index=%lu\n", 233 expected, ptr[i], i); 234 errors++; 235 if (errors > 20) 236 error(1, 0, "validation failed."); 237 } 238 seed++; 239 if (seed == do_validation) 240 seed = 0; 241 } 242 243 fprintf(stdout, "Validated buffer\n"); 244 } 245 246 static int rxq_num(int ifindex) 247 { 248 struct ethtool_channels_get_req *req; 249 struct ethtool_channels_get_rsp *rsp; 250 struct ynl_error yerr; 251 struct ynl_sock *ys; 252 int num = -1; 253 254 ys = ynl_sock_create(&ynl_ethtool_family, &yerr); 255 if (!ys) { 256 fprintf(stderr, "YNL: %s\n", yerr.msg); 257 return -1; 258 } 259 260 req = ethtool_channels_get_req_alloc(); 261 ethtool_channels_get_req_set_header_dev_index(req, ifindex); 262 rsp = ethtool_channels_get(ys, req); 263 if (rsp) 264 num = rsp->rx_count + rsp->combined_count; 265 ethtool_channels_get_req_free(req); 266 ethtool_channels_get_rsp_free(rsp); 267 268 ynl_sock_destroy(ys); 269 270 return num; 271 } 272 273 #define run_command(cmd, ...) \ 274 ({ \ 275 char command[256]; \ 276 memset(command, 0, sizeof(command)); \ 277 snprintf(command, sizeof(command), cmd, ##__VA_ARGS__); \ 278 fprintf(stderr, "Running: %s\n", command); \ 279 system(command); \ 280 }) 281 282 static int reset_flow_steering(void) 283 { 284 /* Depending on the NIC, toggling ntuple off and on might not 285 * be allowed. Additionally, attempting to delete existing filters 286 * will fail if no filters are present. Therefore, do not enforce 287 * the exit status. 288 */ 289 290 run_command("sudo ethtool -K %s ntuple off >&2", ifname); 291 run_command("sudo ethtool -K %s ntuple on >&2", ifname); 292 run_command( 293 "sudo ethtool -n %s | grep 'Filter:' | awk '{print $2}' | xargs -n1 ethtool -N %s delete >&2", 294 ifname, ifname); 295 return 0; 296 } 297 298 static const char *tcp_data_split_str(int val) 299 { 300 switch (val) { 301 case 0: 302 return "off"; 303 case 1: 304 return "auto"; 305 case 2: 306 return "on"; 307 default: 308 return "?"; 309 } 310 } 311 312 static int configure_headersplit(bool on) 313 { 314 struct ethtool_rings_get_req *get_req; 315 struct ethtool_rings_get_rsp *get_rsp; 316 struct ethtool_rings_set_req *req; 317 struct ynl_error yerr; 318 struct ynl_sock *ys; 319 int ret; 320 321 ys = ynl_sock_create(&ynl_ethtool_family, &yerr); 322 if (!ys) { 323 fprintf(stderr, "YNL: %s\n", yerr.msg); 324 return -1; 325 } 326 327 req = ethtool_rings_set_req_alloc(); 328 ethtool_rings_set_req_set_header_dev_index(req, ifindex); 329 /* 0 - off, 1 - auto, 2 - on */ 330 ethtool_rings_set_req_set_tcp_data_split(req, on ? 2 : 0); 331 ret = ethtool_rings_set(ys, req); 332 if (ret < 0) 333 fprintf(stderr, "YNL failed: %s\n", ys->err.msg); 334 ethtool_rings_set_req_free(req); 335 336 if (ret == 0) { 337 get_req = ethtool_rings_get_req_alloc(); 338 ethtool_rings_get_req_set_header_dev_index(get_req, ifindex); 339 get_rsp = ethtool_rings_get(ys, get_req); 340 ethtool_rings_get_req_free(get_req); 341 if (get_rsp) 342 fprintf(stderr, "TCP header split: %s\n", 343 tcp_data_split_str(get_rsp->tcp_data_split)); 344 ethtool_rings_get_rsp_free(get_rsp); 345 } 346 347 ynl_sock_destroy(ys); 348 349 return ret; 350 } 351 352 static int configure_rss(void) 353 { 354 return run_command("sudo ethtool -X %s equal %d >&2", ifname, start_queue); 355 } 356 357 static int configure_channels(unsigned int rx, unsigned int tx) 358 { 359 return run_command("sudo ethtool -L %s rx %u tx %u", ifname, rx, tx); 360 } 361 362 static int configure_flow_steering(struct sockaddr_in6 *server_sin) 363 { 364 const char *type = "tcp6"; 365 const char *server_addr; 366 char buf[40]; 367 368 inet_ntop(AF_INET6, &server_sin->sin6_addr, buf, sizeof(buf)); 369 server_addr = buf; 370 371 if (IN6_IS_ADDR_V4MAPPED(&server_sin->sin6_addr)) { 372 type = "tcp4"; 373 server_addr = strrchr(server_addr, ':') + 1; 374 } 375 376 return run_command("sudo ethtool -N %s flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d >&2", 377 ifname, 378 type, 379 client_ip ? "src-ip" : "", 380 client_ip ?: "", 381 server_addr, 382 client_ip ? "src-port" : "", 383 client_ip ? port : "", 384 port, start_queue); 385 } 386 387 static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd, 388 struct netdev_queue_id *queues, 389 unsigned int n_queue_index, struct ynl_sock **ys) 390 { 391 struct netdev_bind_rx_req *req = NULL; 392 struct netdev_bind_rx_rsp *rsp = NULL; 393 struct ynl_error yerr; 394 395 *ys = ynl_sock_create(&ynl_netdev_family, &yerr); 396 if (!*ys) { 397 fprintf(stderr, "YNL: %s\n", yerr.msg); 398 return -1; 399 } 400 401 req = netdev_bind_rx_req_alloc(); 402 netdev_bind_rx_req_set_ifindex(req, ifindex); 403 netdev_bind_rx_req_set_fd(req, dmabuf_fd); 404 __netdev_bind_rx_req_set_queues(req, queues, n_queue_index); 405 406 rsp = netdev_bind_rx(*ys, req); 407 if (!rsp) { 408 perror("netdev_bind_rx"); 409 goto err_close; 410 } 411 412 if (!rsp->_present.id) { 413 perror("id not present"); 414 goto err_close; 415 } 416 417 fprintf(stderr, "got dmabuf id=%d\n", rsp->id); 418 dmabuf_id = rsp->id; 419 420 netdev_bind_rx_req_free(req); 421 netdev_bind_rx_rsp_free(rsp); 422 423 return 0; 424 425 err_close: 426 fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg); 427 netdev_bind_rx_req_free(req); 428 ynl_sock_destroy(*ys); 429 return -1; 430 } 431 432 static int bind_tx_queue(unsigned int ifindex, unsigned int dmabuf_fd, 433 struct ynl_sock **ys) 434 { 435 struct netdev_bind_tx_req *req = NULL; 436 struct netdev_bind_tx_rsp *rsp = NULL; 437 struct ynl_error yerr; 438 439 *ys = ynl_sock_create(&ynl_netdev_family, &yerr); 440 if (!*ys) { 441 fprintf(stderr, "YNL: %s\n", yerr.msg); 442 return -1; 443 } 444 445 req = netdev_bind_tx_req_alloc(); 446 netdev_bind_tx_req_set_ifindex(req, ifindex); 447 netdev_bind_tx_req_set_fd(req, dmabuf_fd); 448 449 rsp = netdev_bind_tx(*ys, req); 450 if (!rsp) { 451 perror("netdev_bind_tx"); 452 goto err_close; 453 } 454 455 if (!rsp->_present.id) { 456 perror("id not present"); 457 goto err_close; 458 } 459 460 fprintf(stderr, "got tx dmabuf id=%d\n", rsp->id); 461 tx_dmabuf_id = rsp->id; 462 463 netdev_bind_tx_req_free(req); 464 netdev_bind_tx_rsp_free(rsp); 465 466 return 0; 467 468 err_close: 469 fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg); 470 netdev_bind_tx_req_free(req); 471 ynl_sock_destroy(*ys); 472 return -1; 473 } 474 475 static void enable_reuseaddr(int fd) 476 { 477 int opt = 1; 478 int ret; 479 480 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)); 481 if (ret) 482 error(1, errno, "%s: [FAIL, SO_REUSEPORT]\n", TEST_PREFIX); 483 484 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); 485 if (ret) 486 error(1, errno, "%s: [FAIL, SO_REUSEADDR]\n", TEST_PREFIX); 487 } 488 489 static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6) 490 { 491 int ret; 492 493 sin6->sin6_family = AF_INET6; 494 sin6->sin6_port = htons(port); 495 496 ret = inet_pton(sin6->sin6_family, str, &sin6->sin6_addr); 497 if (ret != 1) { 498 /* fallback to plain IPv4 */ 499 ret = inet_pton(AF_INET, str, &sin6->sin6_addr.s6_addr32[3]); 500 if (ret != 1) 501 return -1; 502 503 /* add ::ffff prefix */ 504 sin6->sin6_addr.s6_addr32[0] = 0; 505 sin6->sin6_addr.s6_addr32[1] = 0; 506 sin6->sin6_addr.s6_addr16[4] = 0; 507 sin6->sin6_addr.s6_addr16[5] = 0xffff; 508 } 509 510 return 0; 511 } 512 513 static struct netdev_queue_id *create_queues(void) 514 { 515 struct netdev_queue_id *queues; 516 size_t i = 0; 517 518 queues = calloc(num_queues, sizeof(*queues)); 519 for (i = 0; i < num_queues; i++) { 520 queues[i]._present.type = 1; 521 queues[i]._present.id = 1; 522 queues[i].type = NETDEV_QUEUE_TYPE_RX; 523 queues[i].id = start_queue + i; 524 } 525 526 return queues; 527 } 528 529 static int do_server(struct memory_buffer *mem) 530 { 531 char ctrl_data[sizeof(int) * 20000]; 532 struct netdev_queue_id *queues; 533 size_t non_page_aligned_frags = 0; 534 struct sockaddr_in6 client_addr; 535 struct sockaddr_in6 server_sin; 536 size_t page_aligned_frags = 0; 537 size_t total_received = 0; 538 socklen_t client_addr_len; 539 bool is_devmem = false; 540 char *tmp_mem = NULL; 541 struct ynl_sock *ys; 542 char iobuf[819200]; 543 char buffer[256]; 544 int socket_fd; 545 int client_fd; 546 int ret; 547 548 ret = parse_address(server_ip, atoi(port), &server_sin); 549 if (ret < 0) 550 error(1, 0, "parse server address"); 551 552 if (reset_flow_steering()) 553 error(1, 0, "Failed to reset flow steering\n"); 554 555 if (configure_headersplit(1)) 556 error(1, 0, "Failed to enable TCP header split\n"); 557 558 /* Configure RSS to divert all traffic from our devmem queues */ 559 if (configure_rss()) 560 error(1, 0, "Failed to configure rss\n"); 561 562 /* Flow steer our devmem flows to start_queue */ 563 if (configure_flow_steering(&server_sin)) 564 error(1, 0, "Failed to configure flow steering\n"); 565 566 sleep(1); 567 568 if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys)) 569 error(1, 0, "Failed to bind\n"); 570 571 tmp_mem = malloc(mem->size); 572 if (!tmp_mem) 573 error(1, ENOMEM, "malloc failed"); 574 575 socket_fd = socket(AF_INET6, SOCK_STREAM, 0); 576 if (socket_fd < 0) 577 error(1, errno, "%s: [FAIL, create socket]\n", TEST_PREFIX); 578 579 enable_reuseaddr(socket_fd); 580 581 fprintf(stderr, "binding to address %s:%d\n", server_ip, 582 ntohs(server_sin.sin6_port)); 583 584 ret = bind(socket_fd, &server_sin, sizeof(server_sin)); 585 if (ret) 586 error(1, errno, "%s: [FAIL, bind]\n", TEST_PREFIX); 587 588 ret = listen(socket_fd, 1); 589 if (ret) 590 error(1, errno, "%s: [FAIL, listen]\n", TEST_PREFIX); 591 592 client_addr_len = sizeof(client_addr); 593 594 inet_ntop(AF_INET6, &server_sin.sin6_addr, buffer, 595 sizeof(buffer)); 596 fprintf(stderr, "Waiting or connection on %s:%d\n", buffer, 597 ntohs(server_sin.sin6_port)); 598 client_fd = accept(socket_fd, &client_addr, &client_addr_len); 599 600 inet_ntop(AF_INET6, &client_addr.sin6_addr, buffer, 601 sizeof(buffer)); 602 fprintf(stderr, "Got connection from %s:%d\n", buffer, 603 ntohs(client_addr.sin6_port)); 604 605 while (1) { 606 struct iovec iov = { .iov_base = iobuf, 607 .iov_len = sizeof(iobuf) }; 608 struct dmabuf_cmsg *dmabuf_cmsg = NULL; 609 struct cmsghdr *cm = NULL; 610 struct msghdr msg = { 0 }; 611 struct dmabuf_token token; 612 ssize_t ret; 613 614 is_devmem = false; 615 616 msg.msg_iov = &iov; 617 msg.msg_iovlen = 1; 618 msg.msg_control = ctrl_data; 619 msg.msg_controllen = sizeof(ctrl_data); 620 ret = recvmsg(client_fd, &msg, MSG_SOCK_DEVMEM); 621 fprintf(stderr, "recvmsg ret=%ld\n", ret); 622 if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) 623 continue; 624 if (ret < 0) { 625 perror("recvmsg"); 626 continue; 627 } 628 if (ret == 0) { 629 fprintf(stderr, "client exited\n"); 630 goto cleanup; 631 } 632 633 for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) { 634 if (cm->cmsg_level != SOL_SOCKET || 635 (cm->cmsg_type != SCM_DEVMEM_DMABUF && 636 cm->cmsg_type != SCM_DEVMEM_LINEAR)) { 637 fprintf(stderr, "skipping non-devmem cmsg\n"); 638 continue; 639 } 640 641 dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm); 642 is_devmem = true; 643 644 if (cm->cmsg_type == SCM_DEVMEM_LINEAR) { 645 /* TODO: process data copied from skb's linear 646 * buffer. 647 */ 648 fprintf(stderr, 649 "SCM_DEVMEM_LINEAR. dmabuf_cmsg->frag_size=%u\n", 650 dmabuf_cmsg->frag_size); 651 652 continue; 653 } 654 655 token.token_start = dmabuf_cmsg->frag_token; 656 token.token_count = 1; 657 658 total_received += dmabuf_cmsg->frag_size; 659 fprintf(stderr, 660 "received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n", 661 dmabuf_cmsg->frag_offset >> PAGE_SHIFT, 662 dmabuf_cmsg->frag_offset % getpagesize(), 663 dmabuf_cmsg->frag_offset, 664 dmabuf_cmsg->frag_size, dmabuf_cmsg->frag_token, 665 total_received, dmabuf_cmsg->dmabuf_id); 666 667 if (dmabuf_cmsg->dmabuf_id != dmabuf_id) 668 error(1, 0, 669 "received on wrong dmabuf_id: flow steering error\n"); 670 671 if (dmabuf_cmsg->frag_size % getpagesize()) 672 non_page_aligned_frags++; 673 else 674 page_aligned_frags++; 675 676 provider->memcpy_from_device(tmp_mem, mem, 677 dmabuf_cmsg->frag_offset, 678 dmabuf_cmsg->frag_size); 679 680 if (do_validation) 681 validate_buffer(tmp_mem, 682 dmabuf_cmsg->frag_size); 683 else 684 print_nonzero_bytes(tmp_mem, 685 dmabuf_cmsg->frag_size); 686 687 ret = setsockopt(client_fd, SOL_SOCKET, 688 SO_DEVMEM_DONTNEED, &token, 689 sizeof(token)); 690 if (ret != 1) 691 error(1, 0, 692 "SO_DEVMEM_DONTNEED not enough tokens"); 693 } 694 if (!is_devmem) 695 error(1, 0, "flow steering error\n"); 696 697 fprintf(stderr, "total_received=%lu\n", total_received); 698 } 699 700 fprintf(stderr, "%s: ok\n", TEST_PREFIX); 701 702 fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n", 703 page_aligned_frags, non_page_aligned_frags); 704 705 cleanup: 706 707 free(tmp_mem); 708 close(client_fd); 709 close(socket_fd); 710 ynl_sock_destroy(ys); 711 712 return 0; 713 } 714 715 void run_devmem_tests(void) 716 { 717 struct memory_buffer *mem; 718 struct ynl_sock *ys; 719 720 mem = provider->alloc(getpagesize() * NUM_PAGES); 721 722 /* Configure RSS to divert all traffic from our devmem queues */ 723 if (configure_rss()) 724 error(1, 0, "rss error\n"); 725 726 if (configure_headersplit(1)) 727 error(1, 0, "Failed to configure header split\n"); 728 729 if (!bind_rx_queue(ifindex, mem->fd, 730 calloc(num_queues, sizeof(struct netdev_queue_id)), 731 num_queues, &ys)) 732 error(1, 0, "Binding empty queues array should have failed\n"); 733 734 if (configure_headersplit(0)) 735 error(1, 0, "Failed to configure header split\n"); 736 737 if (!bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys)) 738 error(1, 0, "Configure dmabuf with header split off should have failed\n"); 739 740 if (configure_headersplit(1)) 741 error(1, 0, "Failed to configure header split\n"); 742 743 if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys)) 744 error(1, 0, "Failed to bind\n"); 745 746 /* Deactivating a bound queue should not be legal */ 747 if (!configure_channels(num_queues, num_queues - 1)) 748 error(1, 0, "Deactivating a bound queue should be illegal.\n"); 749 750 /* Closing the netlink socket does an implicit unbind */ 751 ynl_sock_destroy(ys); 752 753 provider->free(mem); 754 } 755 756 static uint64_t gettimeofday_ms(void) 757 { 758 struct timeval tv; 759 760 gettimeofday(&tv, NULL); 761 return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000ULL); 762 } 763 764 static int do_poll(int fd) 765 { 766 struct pollfd pfd; 767 int ret; 768 769 pfd.revents = 0; 770 pfd.fd = fd; 771 772 ret = poll(&pfd, 1, waittime_ms); 773 if (ret == -1) 774 error(1, errno, "poll"); 775 776 return ret && (pfd.revents & POLLERR); 777 } 778 779 static void wait_compl(int fd) 780 { 781 int64_t tstop = gettimeofday_ms() + waittime_ms; 782 char control[CMSG_SPACE(100)] = {}; 783 struct sock_extended_err *serr; 784 struct msghdr msg = {}; 785 struct cmsghdr *cm; 786 __u32 hi, lo; 787 int ret; 788 789 msg.msg_control = control; 790 msg.msg_controllen = sizeof(control); 791 792 while (gettimeofday_ms() < tstop) { 793 if (!do_poll(fd)) 794 continue; 795 796 ret = recvmsg(fd, &msg, MSG_ERRQUEUE); 797 if (ret < 0) { 798 if (errno == EAGAIN) 799 continue; 800 error(1, errno, "recvmsg(MSG_ERRQUEUE)"); 801 return; 802 } 803 if (msg.msg_flags & MSG_CTRUNC) 804 error(1, 0, "MSG_CTRUNC\n"); 805 806 for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) { 807 if (cm->cmsg_level != SOL_IP && 808 cm->cmsg_level != SOL_IPV6) 809 continue; 810 if (cm->cmsg_level == SOL_IP && 811 cm->cmsg_type != IP_RECVERR) 812 continue; 813 if (cm->cmsg_level == SOL_IPV6 && 814 cm->cmsg_type != IPV6_RECVERR) 815 continue; 816 817 serr = (void *)CMSG_DATA(cm); 818 if (serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) 819 error(1, 0, "wrong origin %u", serr->ee_origin); 820 if (serr->ee_errno != 0) 821 error(1, 0, "wrong errno %d", serr->ee_errno); 822 823 hi = serr->ee_data; 824 lo = serr->ee_info; 825 826 fprintf(stderr, "tx complete [%d,%d]\n", lo, hi); 827 return; 828 } 829 } 830 831 error(1, 0, "did not receive tx completion"); 832 } 833 834 static int do_client(struct memory_buffer *mem) 835 { 836 char ctrl_data[CMSG_SPACE(sizeof(__u32))]; 837 struct sockaddr_in6 server_sin; 838 struct sockaddr_in6 client_sin; 839 struct ynl_sock *ys = NULL; 840 struct iovec iov[MAX_IOV]; 841 struct msghdr msg = {}; 842 ssize_t line_size = 0; 843 struct cmsghdr *cmsg; 844 char *line = NULL; 845 unsigned long mid; 846 size_t len = 0; 847 int socket_fd; 848 __u32 ddmabuf; 849 int opt = 1; 850 int ret; 851 852 ret = parse_address(server_ip, atoi(port), &server_sin); 853 if (ret < 0) 854 error(1, 0, "parse server address"); 855 856 socket_fd = socket(AF_INET6, SOCK_STREAM, 0); 857 if (socket_fd < 0) 858 error(1, socket_fd, "create socket"); 859 860 enable_reuseaddr(socket_fd); 861 862 ret = setsockopt(socket_fd, SOL_SOCKET, SO_BINDTODEVICE, ifname, 863 strlen(ifname) + 1); 864 if (ret) 865 error(1, errno, "bindtodevice"); 866 867 if (bind_tx_queue(ifindex, mem->fd, &ys)) 868 error(1, 0, "Failed to bind\n"); 869 870 if (client_ip) { 871 ret = parse_address(client_ip, atoi(port), &client_sin); 872 if (ret < 0) 873 error(1, 0, "parse client address"); 874 875 ret = bind(socket_fd, &client_sin, sizeof(client_sin)); 876 if (ret) 877 error(1, errno, "bind"); 878 } 879 880 ret = setsockopt(socket_fd, SOL_SOCKET, SO_ZEROCOPY, &opt, sizeof(opt)); 881 if (ret) 882 error(1, errno, "set sock opt"); 883 884 fprintf(stderr, "Connect to %s %d (via %s)\n", server_ip, 885 ntohs(server_sin.sin6_port), ifname); 886 887 ret = connect(socket_fd, &server_sin, sizeof(server_sin)); 888 if (ret) 889 error(1, errno, "connect"); 890 891 while (1) { 892 free(line); 893 line = NULL; 894 line_size = getline(&line, &len, stdin); 895 896 if (line_size < 0) 897 break; 898 899 if (max_chunk) { 900 msg.msg_iovlen = 901 (line_size + max_chunk - 1) / max_chunk; 902 if (msg.msg_iovlen > MAX_IOV) 903 error(1, 0, 904 "can't partition %zd bytes into maximum of %d chunks", 905 line_size, MAX_IOV); 906 907 for (int i = 0; i < msg.msg_iovlen; i++) { 908 iov[i].iov_base = (void *)(i * max_chunk); 909 iov[i].iov_len = max_chunk; 910 } 911 912 iov[msg.msg_iovlen - 1].iov_len = 913 line_size - (msg.msg_iovlen - 1) * max_chunk; 914 } else { 915 iov[0].iov_base = 0; 916 iov[0].iov_len = line_size; 917 msg.msg_iovlen = 1; 918 } 919 920 msg.msg_iov = iov; 921 provider->memcpy_to_device(mem, 0, line, line_size); 922 923 msg.msg_control = ctrl_data; 924 msg.msg_controllen = sizeof(ctrl_data); 925 926 cmsg = CMSG_FIRSTHDR(&msg); 927 cmsg->cmsg_level = SOL_SOCKET; 928 cmsg->cmsg_type = SCM_DEVMEM_DMABUF; 929 cmsg->cmsg_len = CMSG_LEN(sizeof(__u32)); 930 931 ddmabuf = tx_dmabuf_id; 932 933 *((__u32 *)CMSG_DATA(cmsg)) = ddmabuf; 934 935 ret = sendmsg(socket_fd, &msg, MSG_ZEROCOPY); 936 if (ret < 0) 937 error(1, errno, "Failed sendmsg"); 938 939 fprintf(stderr, "sendmsg_ret=%d\n", ret); 940 941 if (ret != line_size) 942 error(1, errno, "Did not send all bytes %d vs %zd", ret, 943 line_size); 944 945 wait_compl(socket_fd); 946 } 947 948 fprintf(stderr, "%s: tx ok\n", TEST_PREFIX); 949 950 free(line); 951 close(socket_fd); 952 953 if (ys) 954 ynl_sock_destroy(ys); 955 956 return 0; 957 } 958 959 int main(int argc, char *argv[]) 960 { 961 struct memory_buffer *mem; 962 int is_server = 0, opt; 963 int ret; 964 965 while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:z:")) != -1) { 966 switch (opt) { 967 case 'l': 968 is_server = 1; 969 break; 970 case 's': 971 server_ip = optarg; 972 break; 973 case 'c': 974 client_ip = optarg; 975 break; 976 case 'p': 977 port = optarg; 978 break; 979 case 'v': 980 do_validation = atoll(optarg); 981 break; 982 case 'q': 983 num_queues = atoi(optarg); 984 break; 985 case 't': 986 start_queue = atoi(optarg); 987 break; 988 case 'f': 989 ifname = optarg; 990 break; 991 case 'z': 992 max_chunk = atoi(optarg); 993 break; 994 case '?': 995 fprintf(stderr, "unknown option: %c\n", optopt); 996 break; 997 } 998 } 999 1000 if (!ifname) 1001 error(1, 0, "Missing -f argument\n"); 1002 1003 ifindex = if_nametoindex(ifname); 1004 1005 fprintf(stderr, "using ifindex=%u\n", ifindex); 1006 1007 if (!server_ip && !client_ip) { 1008 if (start_queue < 0 && num_queues < 0) { 1009 num_queues = rxq_num(ifindex); 1010 if (num_queues < 0) 1011 error(1, 0, "couldn't detect number of queues\n"); 1012 if (num_queues < 2) 1013 error(1, 0, 1014 "number of device queues is too low\n"); 1015 /* make sure can bind to multiple queues */ 1016 start_queue = num_queues / 2; 1017 num_queues /= 2; 1018 } 1019 1020 if (start_queue < 0 || num_queues < 0) 1021 error(1, 0, "Both -t and -q are required\n"); 1022 1023 run_devmem_tests(); 1024 return 0; 1025 } 1026 1027 if (start_queue < 0 && num_queues < 0) { 1028 num_queues = rxq_num(ifindex); 1029 if (num_queues < 2) 1030 error(1, 0, "number of device queues is too low\n"); 1031 1032 num_queues = 1; 1033 start_queue = rxq_num(ifindex) - num_queues; 1034 1035 if (start_queue < 0) 1036 error(1, 0, "couldn't detect number of queues\n"); 1037 1038 fprintf(stderr, "using queues %d..%d\n", start_queue, start_queue + num_queues); 1039 } 1040 1041 for (; optind < argc; optind++) 1042 fprintf(stderr, "extra arguments: %s\n", argv[optind]); 1043 1044 if (start_queue < 0) 1045 error(1, 0, "Missing -t argument\n"); 1046 1047 if (num_queues < 0) 1048 error(1, 0, "Missing -q argument\n"); 1049 1050 if (!server_ip) 1051 error(1, 0, "Missing -s argument\n"); 1052 1053 if (!port) 1054 error(1, 0, "Missing -p argument\n"); 1055 1056 mem = provider->alloc(getpagesize() * NUM_PAGES); 1057 ret = is_server ? do_server(mem) : do_client(mem); 1058 provider->free(mem); 1059 1060 return ret; 1061 } 1062