1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * tcpdevmem netcat. Works similarly to netcat but does device memory TCP
4 * instead of regular TCP. Uses udmabuf to mock a dmabuf provider.
5 *
6 * Usage:
7 *
8 * On server:
9 * ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201
10 *
11 * On client:
12 * echo -n "hello\nworld" | nc -s <server IP> 5201 -p 5201
13 *
14 * Test data validation:
15 *
16 * On server:
17 * ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201 -v 7
18 *
19 * On client:
20 * yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \
21 * tr \\n \\0 | \
22 * head -c 5G | \
23 * nc <server IP> 5201 -p 5201
24 *
25 *
26 * Note this is compatible with regular netcat. i.e. the sender or receiver can
27 * be replaced with regular netcat to test the RX or TX path in isolation.
28 */
29 #define _GNU_SOURCE
30 #define __EXPORTED_HEADERS__
31
32 #include <linux/uio.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <stdbool.h>
37 #include <string.h>
38 #include <errno.h>
39 #define __iovec_defined
40 #include <fcntl.h>
41 #include <malloc.h>
42 #include <error.h>
43
44 #include <arpa/inet.h>
45 #include <sys/socket.h>
46 #include <sys/mman.h>
47 #include <sys/ioctl.h>
48 #include <sys/syscall.h>
49
50 #include <linux/memfd.h>
51 #include <linux/dma-buf.h>
52 #include <linux/udmabuf.h>
53 #include <linux/types.h>
54 #include <linux/netlink.h>
55 #include <linux/genetlink.h>
56 #include <linux/netdev.h>
57 #include <linux/ethtool_netlink.h>
58 #include <time.h>
59 #include <net/if.h>
60
61 #include "netdev-user.h"
62 #include "ethtool-user.h"
63 #include <ynl.h>
64
65 #define PAGE_SHIFT 12
66 #define TEST_PREFIX "ncdevmem"
67 #define NUM_PAGES 16000
68
69 #ifndef MSG_SOCK_DEVMEM
70 #define MSG_SOCK_DEVMEM 0x2000000
71 #endif
72
73 static char *server_ip;
74 static char *client_ip;
75 static char *port;
76 static size_t do_validation;
77 static int start_queue = -1;
78 static int num_queues = -1;
79 static char *ifname;
80 static unsigned int ifindex;
81 static unsigned int dmabuf_id;
82
83 struct memory_buffer {
84 int fd;
85 size_t size;
86
87 int devfd;
88 int memfd;
89 char *buf_mem;
90 };
91
92 struct memory_provider {
93 struct memory_buffer *(*alloc)(size_t size);
94 void (*free)(struct memory_buffer *ctx);
95 void (*memcpy_from_device)(void *dst, struct memory_buffer *src,
96 size_t off, int n);
97 };
98
udmabuf_alloc(size_t size)99 static struct memory_buffer *udmabuf_alloc(size_t size)
100 {
101 struct udmabuf_create create;
102 struct memory_buffer *ctx;
103 int ret;
104
105 ctx = malloc(sizeof(*ctx));
106 if (!ctx)
107 error(1, ENOMEM, "malloc failed");
108
109 ctx->size = size;
110
111 ctx->devfd = open("/dev/udmabuf", O_RDWR);
112 if (ctx->devfd < 0)
113 error(1, errno,
114 "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
115 TEST_PREFIX);
116
117 ctx->memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
118 if (ctx->memfd < 0)
119 error(1, errno, "%s: [skip,no-memfd]\n", TEST_PREFIX);
120
121 ret = fcntl(ctx->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
122 if (ret < 0)
123 error(1, errno, "%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
124
125 ret = ftruncate(ctx->memfd, size);
126 if (ret == -1)
127 error(1, errno, "%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
128
129 memset(&create, 0, sizeof(create));
130
131 create.memfd = ctx->memfd;
132 create.offset = 0;
133 create.size = size;
134 ctx->fd = ioctl(ctx->devfd, UDMABUF_CREATE, &create);
135 if (ctx->fd < 0)
136 error(1, errno, "%s: [FAIL, create udmabuf]\n", TEST_PREFIX);
137
138 ctx->buf_mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
139 ctx->fd, 0);
140 if (ctx->buf_mem == MAP_FAILED)
141 error(1, errno, "%s: [FAIL, map udmabuf]\n", TEST_PREFIX);
142
143 return ctx;
144 }
145
udmabuf_free(struct memory_buffer * ctx)146 static void udmabuf_free(struct memory_buffer *ctx)
147 {
148 munmap(ctx->buf_mem, ctx->size);
149 close(ctx->fd);
150 close(ctx->memfd);
151 close(ctx->devfd);
152 free(ctx);
153 }
154
udmabuf_memcpy_from_device(void * dst,struct memory_buffer * src,size_t off,int n)155 static void udmabuf_memcpy_from_device(void *dst, struct memory_buffer *src,
156 size_t off, int n)
157 {
158 struct dma_buf_sync sync = {};
159
160 sync.flags = DMA_BUF_SYNC_START;
161 ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
162
163 memcpy(dst, src->buf_mem + off, n);
164
165 sync.flags = DMA_BUF_SYNC_END;
166 ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
167 }
168
169 static struct memory_provider udmabuf_memory_provider = {
170 .alloc = udmabuf_alloc,
171 .free = udmabuf_free,
172 .memcpy_from_device = udmabuf_memcpy_from_device,
173 };
174
175 static struct memory_provider *provider = &udmabuf_memory_provider;
176
print_nonzero_bytes(void * ptr,size_t size)177 static void print_nonzero_bytes(void *ptr, size_t size)
178 {
179 unsigned char *p = ptr;
180 unsigned int i;
181
182 for (i = 0; i < size; i++)
183 putchar(p[i]);
184 }
185
validate_buffer(void * line,size_t size)186 void validate_buffer(void *line, size_t size)
187 {
188 static unsigned char seed = 1;
189 unsigned char *ptr = line;
190 int errors = 0;
191 size_t i;
192
193 for (i = 0; i < size; i++) {
194 if (ptr[i] != seed) {
195 fprintf(stderr,
196 "Failed validation: expected=%u, actual=%u, index=%lu\n",
197 seed, ptr[i], i);
198 errors++;
199 if (errors > 20)
200 error(1, 0, "validation failed.");
201 }
202 seed++;
203 if (seed == do_validation)
204 seed = 0;
205 }
206
207 fprintf(stdout, "Validated buffer\n");
208 }
209
rxq_num(int ifindex)210 static int rxq_num(int ifindex)
211 {
212 struct ethtool_channels_get_req *req;
213 struct ethtool_channels_get_rsp *rsp;
214 struct ynl_error yerr;
215 struct ynl_sock *ys;
216 int num = -1;
217
218 ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
219 if (!ys) {
220 fprintf(stderr, "YNL: %s\n", yerr.msg);
221 return -1;
222 }
223
224 req = ethtool_channels_get_req_alloc();
225 ethtool_channels_get_req_set_header_dev_index(req, ifindex);
226 rsp = ethtool_channels_get(ys, req);
227 if (rsp)
228 num = rsp->rx_count + rsp->combined_count;
229 ethtool_channels_get_req_free(req);
230 ethtool_channels_get_rsp_free(rsp);
231
232 ynl_sock_destroy(ys);
233
234 return num;
235 }
236
237 #define run_command(cmd, ...) \
238 ({ \
239 char command[256]; \
240 memset(command, 0, sizeof(command)); \
241 snprintf(command, sizeof(command), cmd, ##__VA_ARGS__); \
242 fprintf(stderr, "Running: %s\n", command); \
243 system(command); \
244 })
245
reset_flow_steering(void)246 static int reset_flow_steering(void)
247 {
248 /* Depending on the NIC, toggling ntuple off and on might not
249 * be allowed. Additionally, attempting to delete existing filters
250 * will fail if no filters are present. Therefore, do not enforce
251 * the exit status.
252 */
253
254 run_command("sudo ethtool -K %s ntuple off >&2", ifname);
255 run_command("sudo ethtool -K %s ntuple on >&2", ifname);
256 run_command(
257 "sudo ethtool -n %s | grep 'Filter:' | awk '{print $2}' | xargs -n1 ethtool -N %s delete >&2",
258 ifname, ifname);
259 return 0;
260 }
261
tcp_data_split_str(int val)262 static const char *tcp_data_split_str(int val)
263 {
264 switch (val) {
265 case 0:
266 return "off";
267 case 1:
268 return "auto";
269 case 2:
270 return "on";
271 default:
272 return "?";
273 }
274 }
275
configure_headersplit(bool on)276 static int configure_headersplit(bool on)
277 {
278 struct ethtool_rings_get_req *get_req;
279 struct ethtool_rings_get_rsp *get_rsp;
280 struct ethtool_rings_set_req *req;
281 struct ynl_error yerr;
282 struct ynl_sock *ys;
283 int ret;
284
285 ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
286 if (!ys) {
287 fprintf(stderr, "YNL: %s\n", yerr.msg);
288 return -1;
289 }
290
291 req = ethtool_rings_set_req_alloc();
292 ethtool_rings_set_req_set_header_dev_index(req, ifindex);
293 /* 0 - off, 1 - auto, 2 - on */
294 ethtool_rings_set_req_set_tcp_data_split(req, on ? 2 : 0);
295 ret = ethtool_rings_set(ys, req);
296 if (ret < 0)
297 fprintf(stderr, "YNL failed: %s\n", ys->err.msg);
298 ethtool_rings_set_req_free(req);
299
300 if (ret == 0) {
301 get_req = ethtool_rings_get_req_alloc();
302 ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
303 get_rsp = ethtool_rings_get(ys, get_req);
304 ethtool_rings_get_req_free(get_req);
305 if (get_rsp)
306 fprintf(stderr, "TCP header split: %s\n",
307 tcp_data_split_str(get_rsp->tcp_data_split));
308 ethtool_rings_get_rsp_free(get_rsp);
309 }
310
311 ynl_sock_destroy(ys);
312
313 return ret;
314 }
315
configure_rss(void)316 static int configure_rss(void)
317 {
318 return run_command("sudo ethtool -X %s equal %d >&2", ifname, start_queue);
319 }
320
configure_channels(unsigned int rx,unsigned int tx)321 static int configure_channels(unsigned int rx, unsigned int tx)
322 {
323 return run_command("sudo ethtool -L %s rx %u tx %u", ifname, rx, tx);
324 }
325
configure_flow_steering(struct sockaddr_in6 * server_sin)326 static int configure_flow_steering(struct sockaddr_in6 *server_sin)
327 {
328 const char *type = "tcp6";
329 const char *server_addr;
330 char buf[40];
331
332 inet_ntop(AF_INET6, &server_sin->sin6_addr, buf, sizeof(buf));
333 server_addr = buf;
334
335 if (IN6_IS_ADDR_V4MAPPED(&server_sin->sin6_addr)) {
336 type = "tcp4";
337 server_addr = strrchr(server_addr, ':') + 1;
338 }
339
340 return run_command("sudo ethtool -N %s flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d >&2",
341 ifname,
342 type,
343 client_ip ? "src-ip" : "",
344 client_ip ?: "",
345 server_addr,
346 client_ip ? "src-port" : "",
347 client_ip ? port : "",
348 port, start_queue);
349 }
350
bind_rx_queue(unsigned int ifindex,unsigned int dmabuf_fd,struct netdev_queue_id * queues,unsigned int n_queue_index,struct ynl_sock ** ys)351 static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
352 struct netdev_queue_id *queues,
353 unsigned int n_queue_index, struct ynl_sock **ys)
354 {
355 struct netdev_bind_rx_req *req = NULL;
356 struct netdev_bind_rx_rsp *rsp = NULL;
357 struct ynl_error yerr;
358
359 *ys = ynl_sock_create(&ynl_netdev_family, &yerr);
360 if (!*ys) {
361 fprintf(stderr, "YNL: %s\n", yerr.msg);
362 return -1;
363 }
364
365 req = netdev_bind_rx_req_alloc();
366 netdev_bind_rx_req_set_ifindex(req, ifindex);
367 netdev_bind_rx_req_set_fd(req, dmabuf_fd);
368 __netdev_bind_rx_req_set_queues(req, queues, n_queue_index);
369
370 rsp = netdev_bind_rx(*ys, req);
371 if (!rsp) {
372 perror("netdev_bind_rx");
373 goto err_close;
374 }
375
376 if (!rsp->_present.id) {
377 perror("id not present");
378 goto err_close;
379 }
380
381 fprintf(stderr, "got dmabuf id=%d\n", rsp->id);
382 dmabuf_id = rsp->id;
383
384 netdev_bind_rx_req_free(req);
385 netdev_bind_rx_rsp_free(rsp);
386
387 return 0;
388
389 err_close:
390 fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg);
391 netdev_bind_rx_req_free(req);
392 ynl_sock_destroy(*ys);
393 return -1;
394 }
395
enable_reuseaddr(int fd)396 static void enable_reuseaddr(int fd)
397 {
398 int opt = 1;
399 int ret;
400
401 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
402 if (ret)
403 error(1, errno, "%s: [FAIL, SO_REUSEPORT]\n", TEST_PREFIX);
404
405 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
406 if (ret)
407 error(1, errno, "%s: [FAIL, SO_REUSEADDR]\n", TEST_PREFIX);
408 }
409
parse_address(const char * str,int port,struct sockaddr_in6 * sin6)410 static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
411 {
412 int ret;
413
414 sin6->sin6_family = AF_INET6;
415 sin6->sin6_port = htons(port);
416
417 ret = inet_pton(sin6->sin6_family, str, &sin6->sin6_addr);
418 if (ret != 1) {
419 /* fallback to plain IPv4 */
420 ret = inet_pton(AF_INET, str, &sin6->sin6_addr.s6_addr32[3]);
421 if (ret != 1)
422 return -1;
423
424 /* add ::ffff prefix */
425 sin6->sin6_addr.s6_addr32[0] = 0;
426 sin6->sin6_addr.s6_addr32[1] = 0;
427 sin6->sin6_addr.s6_addr16[4] = 0;
428 sin6->sin6_addr.s6_addr16[5] = 0xffff;
429 }
430
431 return 0;
432 }
433
do_server(struct memory_buffer * mem)434 int do_server(struct memory_buffer *mem)
435 {
436 char ctrl_data[sizeof(int) * 20000];
437 struct netdev_queue_id *queues;
438 size_t non_page_aligned_frags = 0;
439 struct sockaddr_in6 client_addr;
440 struct sockaddr_in6 server_sin;
441 size_t page_aligned_frags = 0;
442 size_t total_received = 0;
443 socklen_t client_addr_len;
444 bool is_devmem = false;
445 char *tmp_mem = NULL;
446 struct ynl_sock *ys;
447 char iobuf[819200];
448 char buffer[256];
449 int socket_fd;
450 int client_fd;
451 size_t i = 0;
452 int ret;
453
454 ret = parse_address(server_ip, atoi(port), &server_sin);
455 if (ret < 0)
456 error(1, 0, "parse server address");
457
458 if (reset_flow_steering())
459 error(1, 0, "Failed to reset flow steering\n");
460
461 if (configure_headersplit(1))
462 error(1, 0, "Failed to enable TCP header split\n");
463
464 /* Configure RSS to divert all traffic from our devmem queues */
465 if (configure_rss())
466 error(1, 0, "Failed to configure rss\n");
467
468 /* Flow steer our devmem flows to start_queue */
469 if (configure_flow_steering(&server_sin))
470 error(1, 0, "Failed to configure flow steering\n");
471
472 sleep(1);
473
474 queues = malloc(sizeof(*queues) * num_queues);
475
476 for (i = 0; i < num_queues; i++) {
477 queues[i]._present.type = 1;
478 queues[i]._present.id = 1;
479 queues[i].type = NETDEV_QUEUE_TYPE_RX;
480 queues[i].id = start_queue + i;
481 }
482
483 if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
484 error(1, 0, "Failed to bind\n");
485
486 tmp_mem = malloc(mem->size);
487 if (!tmp_mem)
488 error(1, ENOMEM, "malloc failed");
489
490 socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
491 if (socket_fd < 0)
492 error(1, errno, "%s: [FAIL, create socket]\n", TEST_PREFIX);
493
494 enable_reuseaddr(socket_fd);
495
496 fprintf(stderr, "binding to address %s:%d\n", server_ip,
497 ntohs(server_sin.sin6_port));
498
499 ret = bind(socket_fd, &server_sin, sizeof(server_sin));
500 if (ret)
501 error(1, errno, "%s: [FAIL, bind]\n", TEST_PREFIX);
502
503 ret = listen(socket_fd, 1);
504 if (ret)
505 error(1, errno, "%s: [FAIL, listen]\n", TEST_PREFIX);
506
507 client_addr_len = sizeof(client_addr);
508
509 inet_ntop(AF_INET6, &server_sin.sin6_addr, buffer,
510 sizeof(buffer));
511 fprintf(stderr, "Waiting or connection on %s:%d\n", buffer,
512 ntohs(server_sin.sin6_port));
513 client_fd = accept(socket_fd, &client_addr, &client_addr_len);
514
515 inet_ntop(AF_INET6, &client_addr.sin6_addr, buffer,
516 sizeof(buffer));
517 fprintf(stderr, "Got connection from %s:%d\n", buffer,
518 ntohs(client_addr.sin6_port));
519
520 while (1) {
521 struct iovec iov = { .iov_base = iobuf,
522 .iov_len = sizeof(iobuf) };
523 struct dmabuf_cmsg *dmabuf_cmsg = NULL;
524 struct cmsghdr *cm = NULL;
525 struct msghdr msg = { 0 };
526 struct dmabuf_token token;
527 ssize_t ret;
528
529 is_devmem = false;
530
531 msg.msg_iov = &iov;
532 msg.msg_iovlen = 1;
533 msg.msg_control = ctrl_data;
534 msg.msg_controllen = sizeof(ctrl_data);
535 ret = recvmsg(client_fd, &msg, MSG_SOCK_DEVMEM);
536 fprintf(stderr, "recvmsg ret=%ld\n", ret);
537 if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK))
538 continue;
539 if (ret < 0) {
540 perror("recvmsg");
541 continue;
542 }
543 if (ret == 0) {
544 fprintf(stderr, "client exited\n");
545 goto cleanup;
546 }
547
548 i++;
549 for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
550 if (cm->cmsg_level != SOL_SOCKET ||
551 (cm->cmsg_type != SCM_DEVMEM_DMABUF &&
552 cm->cmsg_type != SCM_DEVMEM_LINEAR)) {
553 fprintf(stderr, "skipping non-devmem cmsg\n");
554 continue;
555 }
556
557 dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm);
558 is_devmem = true;
559
560 if (cm->cmsg_type == SCM_DEVMEM_LINEAR) {
561 /* TODO: process data copied from skb's linear
562 * buffer.
563 */
564 fprintf(stderr,
565 "SCM_DEVMEM_LINEAR. dmabuf_cmsg->frag_size=%u\n",
566 dmabuf_cmsg->frag_size);
567
568 continue;
569 }
570
571 token.token_start = dmabuf_cmsg->frag_token;
572 token.token_count = 1;
573
574 total_received += dmabuf_cmsg->frag_size;
575 fprintf(stderr,
576 "received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n",
577 dmabuf_cmsg->frag_offset >> PAGE_SHIFT,
578 dmabuf_cmsg->frag_offset % getpagesize(),
579 dmabuf_cmsg->frag_offset,
580 dmabuf_cmsg->frag_size, dmabuf_cmsg->frag_token,
581 total_received, dmabuf_cmsg->dmabuf_id);
582
583 if (dmabuf_cmsg->dmabuf_id != dmabuf_id)
584 error(1, 0,
585 "received on wrong dmabuf_id: flow steering error\n");
586
587 if (dmabuf_cmsg->frag_size % getpagesize())
588 non_page_aligned_frags++;
589 else
590 page_aligned_frags++;
591
592 provider->memcpy_from_device(tmp_mem, mem,
593 dmabuf_cmsg->frag_offset,
594 dmabuf_cmsg->frag_size);
595
596 if (do_validation)
597 validate_buffer(tmp_mem,
598 dmabuf_cmsg->frag_size);
599 else
600 print_nonzero_bytes(tmp_mem,
601 dmabuf_cmsg->frag_size);
602
603 ret = setsockopt(client_fd, SOL_SOCKET,
604 SO_DEVMEM_DONTNEED, &token,
605 sizeof(token));
606 if (ret != 1)
607 error(1, 0,
608 "SO_DEVMEM_DONTNEED not enough tokens");
609 }
610 if (!is_devmem)
611 error(1, 0, "flow steering error\n");
612
613 fprintf(stderr, "total_received=%lu\n", total_received);
614 }
615
616 fprintf(stderr, "%s: ok\n", TEST_PREFIX);
617
618 fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
619 page_aligned_frags, non_page_aligned_frags);
620
621 cleanup:
622
623 free(tmp_mem);
624 close(client_fd);
625 close(socket_fd);
626 ynl_sock_destroy(ys);
627
628 return 0;
629 }
630
run_devmem_tests(void)631 void run_devmem_tests(void)
632 {
633 struct netdev_queue_id *queues;
634 struct memory_buffer *mem;
635 struct ynl_sock *ys;
636 size_t i = 0;
637
638 mem = provider->alloc(getpagesize() * NUM_PAGES);
639
640 /* Configure RSS to divert all traffic from our devmem queues */
641 if (configure_rss())
642 error(1, 0, "rss error\n");
643
644 queues = calloc(num_queues, sizeof(*queues));
645
646 if (configure_headersplit(1))
647 error(1, 0, "Failed to configure header split\n");
648
649 if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
650 error(1, 0, "Binding empty queues array should have failed\n");
651
652 for (i = 0; i < num_queues; i++) {
653 queues[i]._present.type = 1;
654 queues[i]._present.id = 1;
655 queues[i].type = NETDEV_QUEUE_TYPE_RX;
656 queues[i].id = start_queue + i;
657 }
658
659 if (configure_headersplit(0))
660 error(1, 0, "Failed to configure header split\n");
661
662 if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
663 error(1, 0, "Configure dmabuf with header split off should have failed\n");
664
665 if (configure_headersplit(1))
666 error(1, 0, "Failed to configure header split\n");
667
668 for (i = 0; i < num_queues; i++) {
669 queues[i]._present.type = 1;
670 queues[i]._present.id = 1;
671 queues[i].type = NETDEV_QUEUE_TYPE_RX;
672 queues[i].id = start_queue + i;
673 }
674
675 if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
676 error(1, 0, "Failed to bind\n");
677
678 /* Deactivating a bound queue should not be legal */
679 if (!configure_channels(num_queues, num_queues - 1))
680 error(1, 0, "Deactivating a bound queue should be illegal.\n");
681
682 /* Closing the netlink socket does an implicit unbind */
683 ynl_sock_destroy(ys);
684
685 provider->free(mem);
686 }
687
main(int argc,char * argv[])688 int main(int argc, char *argv[])
689 {
690 struct memory_buffer *mem;
691 int is_server = 0, opt;
692 int ret;
693
694 while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:")) != -1) {
695 switch (opt) {
696 case 'l':
697 is_server = 1;
698 break;
699 case 's':
700 server_ip = optarg;
701 break;
702 case 'c':
703 client_ip = optarg;
704 break;
705 case 'p':
706 port = optarg;
707 break;
708 case 'v':
709 do_validation = atoll(optarg);
710 break;
711 case 'q':
712 num_queues = atoi(optarg);
713 break;
714 case 't':
715 start_queue = atoi(optarg);
716 break;
717 case 'f':
718 ifname = optarg;
719 break;
720 case '?':
721 fprintf(stderr, "unknown option: %c\n", optopt);
722 break;
723 }
724 }
725
726 if (!ifname)
727 error(1, 0, "Missing -f argument\n");
728
729 ifindex = if_nametoindex(ifname);
730
731 if (!server_ip && !client_ip) {
732 if (start_queue < 0 && num_queues < 0) {
733 num_queues = rxq_num(ifindex);
734 if (num_queues < 0)
735 error(1, 0, "couldn't detect number of queues\n");
736 if (num_queues < 2)
737 error(1, 0,
738 "number of device queues is too low\n");
739 /* make sure can bind to multiple queues */
740 start_queue = num_queues / 2;
741 num_queues /= 2;
742 }
743
744 if (start_queue < 0 || num_queues < 0)
745 error(1, 0, "Both -t and -q are required\n");
746
747 run_devmem_tests();
748 return 0;
749 }
750
751 if (start_queue < 0 && num_queues < 0) {
752 num_queues = rxq_num(ifindex);
753 if (num_queues < 2)
754 error(1, 0, "number of device queues is too low\n");
755
756 num_queues = 1;
757 start_queue = rxq_num(ifindex) - num_queues;
758
759 if (start_queue < 0)
760 error(1, 0, "couldn't detect number of queues\n");
761
762 fprintf(stderr, "using queues %d..%d\n", start_queue, start_queue + num_queues);
763 }
764
765 for (; optind < argc; optind++)
766 fprintf(stderr, "extra arguments: %s\n", argv[optind]);
767
768 if (start_queue < 0)
769 error(1, 0, "Missing -t argument\n");
770
771 if (num_queues < 0)
772 error(1, 0, "Missing -q argument\n");
773
774 if (!server_ip)
775 error(1, 0, "Missing -s argument\n");
776
777 if (!port)
778 error(1, 0, "Missing -p argument\n");
779
780 mem = provider->alloc(getpagesize() * NUM_PAGES);
781 ret = is_server ? do_server(mem) : 1;
782 provider->free(mem);
783
784 return ret;
785 }
786