xref: /linux/tools/testing/selftests/drivers/net/hw/ncdevmem.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * tcpdevmem netcat. Works similarly to netcat but does device memory TCP
4  * instead of regular TCP. Uses udmabuf to mock a dmabuf provider.
5  *
6  * Usage:
7  *
8  *     On server:
9  *     ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201
10  *
11  *     On client:
12  *     echo -n "hello\nworld" | \
13  *		ncdevmem -s <server IP> [-c <client IP>] -p 5201 -f eth1
14  *
15  * Note this is compatible with regular netcat. i.e. the sender or receiver can
16  * be replaced with regular netcat to test the RX or TX path in isolation.
17  *
18  * Test data validation (devmem TCP on RX only):
19  *
20  *     On server:
21  *     ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201 -v 7
22  *
23  *     On client:
24  *     yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \
25  *             head -c 1G | \
26  *             nc <server IP> 5201 -p 5201
27  *
28  * Test data validation (devmem TCP on RX and TX, validation happens on RX):
29  *
30  *	On server:
31  *	ncdevmem -s <server IP> [-c <client IP>] -l -p 5201 -v 8 -f eth1
32  *
33  *	On client:
34  *	yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06\\x07) | \
35  *		head -c 1M | \
36  *		ncdevmem -s <server IP> [-c <client IP>] -p 5201 -f eth1
37  */
38 #define _GNU_SOURCE
39 #define __EXPORTED_HEADERS__
40 
41 #include <linux/uio.h>
42 #include <stdarg.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <unistd.h>
46 #include <stdbool.h>
47 #include <string.h>
48 #include <errno.h>
49 #define __iovec_defined
50 #include <fcntl.h>
51 #include <limits.h>
52 #include <malloc.h>
53 #include <error.h>
54 #include <poll.h>
55 
56 #include <arpa/inet.h>
57 #include <sys/socket.h>
58 #include <sys/mman.h>
59 #include <sys/ioctl.h>
60 #include <sys/syscall.h>
61 #include <sys/time.h>
62 
63 #include <linux/memfd.h>
64 #include <linux/dma-buf.h>
65 #include <linux/errqueue.h>
66 #include <linux/udmabuf.h>
67 #include <linux/types.h>
68 #include <linux/netlink.h>
69 #include <linux/genetlink.h>
70 #include <linux/netdev.h>
71 #include <linux/ethtool_netlink.h>
72 #include <time.h>
73 #include <net/if.h>
74 
75 #include "netdev-user.h"
76 #include "ethtool-user.h"
77 #include <ynl.h>
78 
79 #define PAGE_SHIFT 12
80 #define TEST_PREFIX "ncdevmem"
81 #define NUM_PAGES 16000
82 
83 #ifndef MSG_SOCK_DEVMEM
84 #define MSG_SOCK_DEVMEM 0x2000000
85 #endif
86 
87 #define MAX_IOV 1024
88 
89 static size_t max_chunk;
90 static char *server_ip;
91 static char *client_ip;
92 static char *port;
93 static size_t do_validation;
94 static int start_queue = -1;
95 static int num_queues = -1;
96 static char *ifname;
97 static unsigned int ifindex;
98 static unsigned int dmabuf_id;
99 static uint32_t tx_dmabuf_id;
100 static int waittime_ms = 500;
101 
102 /* System state loaded by current_config_load() */
103 #define MAX_FLOWS	8
104 static int ntuple_ids[MAX_FLOWS] = { -1, -1, -1, -1, -1, -1, -1, -1, };
105 
106 struct memory_buffer {
107 	int fd;
108 	size_t size;
109 
110 	int devfd;
111 	int memfd;
112 	char *buf_mem;
113 };
114 
115 struct memory_provider {
116 	struct memory_buffer *(*alloc)(size_t size);
117 	void (*free)(struct memory_buffer *ctx);
118 	void (*memcpy_to_device)(struct memory_buffer *dst, size_t off,
119 				 void *src, int n);
120 	void (*memcpy_from_device)(void *dst, struct memory_buffer *src,
121 				   size_t off, int n);
122 };
123 
124 static void pr_err(const char *fmt, ...)
125 {
126 	va_list args;
127 
128 	fprintf(stderr, "%s: ", TEST_PREFIX);
129 
130 	va_start(args, fmt);
131 	vfprintf(stderr, fmt, args);
132 	va_end(args);
133 
134 	if (errno != 0)
135 		fprintf(stderr, ": %s", strerror(errno));
136 	fprintf(stderr, "\n");
137 }
138 
139 static struct memory_buffer *udmabuf_alloc(size_t size)
140 {
141 	struct udmabuf_create create;
142 	struct memory_buffer *ctx;
143 	int ret;
144 
145 	ctx = malloc(sizeof(*ctx));
146 	if (!ctx)
147 		return NULL;
148 
149 	ctx->size = size;
150 
151 	ctx->devfd = open("/dev/udmabuf", O_RDWR);
152 	if (ctx->devfd < 0) {
153 		pr_err("[skip,no-udmabuf: Unable to access DMA buffer device file]");
154 		goto err_free_ctx;
155 	}
156 
157 	ctx->memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
158 	if (ctx->memfd < 0) {
159 		pr_err("[skip,no-memfd]");
160 		goto err_close_dev;
161 	}
162 
163 	ret = fcntl(ctx->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
164 	if (ret < 0) {
165 		pr_err("[skip,fcntl-add-seals]");
166 		goto err_close_memfd;
167 	}
168 
169 	ret = ftruncate(ctx->memfd, size);
170 	if (ret == -1) {
171 		pr_err("[FAIL,memfd-truncate]");
172 		goto err_close_memfd;
173 	}
174 
175 	memset(&create, 0, sizeof(create));
176 
177 	create.memfd = ctx->memfd;
178 	create.offset = 0;
179 	create.size = size;
180 	ctx->fd = ioctl(ctx->devfd, UDMABUF_CREATE, &create);
181 	if (ctx->fd < 0) {
182 		pr_err("[FAIL, create udmabuf]");
183 		goto err_close_fd;
184 	}
185 
186 	ctx->buf_mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
187 			    ctx->fd, 0);
188 	if (ctx->buf_mem == MAP_FAILED) {
189 		pr_err("[FAIL, map udmabuf]");
190 		goto err_close_fd;
191 	}
192 
193 	return ctx;
194 
195 err_close_fd:
196 	close(ctx->fd);
197 err_close_memfd:
198 	close(ctx->memfd);
199 err_close_dev:
200 	close(ctx->devfd);
201 err_free_ctx:
202 	free(ctx);
203 	return NULL;
204 }
205 
206 static void udmabuf_free(struct memory_buffer *ctx)
207 {
208 	munmap(ctx->buf_mem, ctx->size);
209 	close(ctx->fd);
210 	close(ctx->memfd);
211 	close(ctx->devfd);
212 	free(ctx);
213 }
214 
215 static void udmabuf_memcpy_to_device(struct memory_buffer *dst, size_t off,
216 				     void *src, int n)
217 {
218 	struct dma_buf_sync sync = {};
219 
220 	sync.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_WRITE;
221 	ioctl(dst->fd, DMA_BUF_IOCTL_SYNC, &sync);
222 
223 	memcpy(dst->buf_mem + off, src, n);
224 
225 	sync.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_WRITE;
226 	ioctl(dst->fd, DMA_BUF_IOCTL_SYNC, &sync);
227 }
228 
229 static void udmabuf_memcpy_from_device(void *dst, struct memory_buffer *src,
230 				       size_t off, int n)
231 {
232 	struct dma_buf_sync sync = {};
233 
234 	sync.flags = DMA_BUF_SYNC_START;
235 	ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
236 
237 	memcpy(dst, src->buf_mem + off, n);
238 
239 	sync.flags = DMA_BUF_SYNC_END;
240 	ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
241 }
242 
243 static struct memory_provider udmabuf_memory_provider = {
244 	.alloc = udmabuf_alloc,
245 	.free = udmabuf_free,
246 	.memcpy_to_device = udmabuf_memcpy_to_device,
247 	.memcpy_from_device = udmabuf_memcpy_from_device,
248 };
249 
250 static struct memory_provider *provider = &udmabuf_memory_provider;
251 
252 static void print_nonzero_bytes(void *ptr, size_t size)
253 {
254 	unsigned char *p = ptr;
255 	unsigned int i;
256 
257 	for (i = 0; i < size; i++)
258 		putchar(p[i]);
259 }
260 
261 int validate_buffer(void *line, size_t size)
262 {
263 	static unsigned char seed = 1;
264 	unsigned char *ptr = line;
265 	unsigned char expected;
266 	static int errors;
267 	size_t i;
268 
269 	for (i = 0; i < size; i++) {
270 		expected = seed ? seed : '\n';
271 		if (ptr[i] != expected) {
272 			fprintf(stderr,
273 				"Failed validation: expected=%u, actual=%u, index=%lu\n",
274 				expected, ptr[i], i);
275 			errors++;
276 			if (errors > 20) {
277 				pr_err("validation failed");
278 				return -1;
279 			}
280 		}
281 		seed++;
282 		if (seed == do_validation)
283 			seed = 0;
284 	}
285 
286 	fprintf(stdout, "Validated buffer\n");
287 	return 0;
288 }
289 
290 static int
291 __run_command(char *out, size_t outlen, const char *cmd, va_list args)
292 {
293 	char command[256];
294 	FILE *fp;
295 
296 	vsnprintf(command, sizeof(command), cmd, args);
297 
298 	fprintf(stderr, "Running: %s\n", command);
299 	fp = popen(command, "r");
300 	if (!fp)
301 		return -1;
302 	if (out) {
303 		size_t len;
304 
305 		if (!fgets(out, outlen, fp))
306 			return -1;
307 
308 		/* Remove trailing newline if present */
309 		len = strlen(out);
310 		if (len && out[len - 1] == '\n')
311 			out[len - 1] = '\0';
312 	}
313 	return pclose(fp);
314 }
315 
316 static int run_command(const char *cmd, ...)
317 {
318 	va_list args;
319 	int ret;
320 
321 	va_start(args, cmd);
322 	ret = __run_command(NULL, 0, cmd, args);
323 	va_end(args);
324 
325 	return ret;
326 }
327 
328 static int ethtool_add_flow(const char *format, ...)
329 {
330 	char local_output[256], cmd[256];
331 	const char *id_start;
332 	int flow_idx, ret;
333 	char *endptr;
334 	long flow_id;
335 	va_list args;
336 
337 	for (flow_idx = 0; flow_idx < MAX_FLOWS; flow_idx++)
338 		if (ntuple_ids[flow_idx] == -1)
339 			break;
340 	if (flow_idx == MAX_FLOWS) {
341 		fprintf(stderr, "Error: too many flows\n");
342 		return -1;
343 	}
344 
345 	snprintf(cmd, sizeof(cmd), "ethtool -N %s %s", ifname, format);
346 
347 	va_start(args, format);
348 	ret = __run_command(local_output, sizeof(local_output), cmd, args);
349 	va_end(args);
350 
351 	if (ret != 0)
352 		return ret;
353 
354 	/* Extract the ID from the output */
355 	id_start = strstr(local_output, "Added rule with ID ");
356 	if (!id_start)
357 		return -1;
358 	id_start += strlen("Added rule with ID ");
359 
360 	flow_id = strtol(id_start, &endptr, 10);
361 	if (endptr == id_start || flow_id < 0 || flow_id > INT_MAX)
362 		return -1;
363 
364 	fprintf(stderr, "Added flow rule with ID %ld\n", flow_id);
365 	ntuple_ids[flow_idx] = flow_id;
366 	return flow_id;
367 }
368 
369 static int rxq_num(int ifindex)
370 {
371 	struct ethtool_channels_get_req *req;
372 	struct ethtool_channels_get_rsp *rsp;
373 	struct ynl_error yerr;
374 	struct ynl_sock *ys;
375 	int num = -1;
376 
377 	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
378 	if (!ys) {
379 		fprintf(stderr, "YNL: %s\n", yerr.msg);
380 		return -1;
381 	}
382 
383 	req = ethtool_channels_get_req_alloc();
384 	ethtool_channels_get_req_set_header_dev_index(req, ifindex);
385 	rsp = ethtool_channels_get(ys, req);
386 	if (rsp)
387 		num = rsp->rx_count + rsp->combined_count;
388 	ethtool_channels_get_req_free(req);
389 	ethtool_channels_get_rsp_free(rsp);
390 
391 	ynl_sock_destroy(ys);
392 
393 	return num;
394 }
395 
396 static void reset_flow_steering(void)
397 {
398 	int i;
399 
400 	for (i = 0; i < MAX_FLOWS; i++) {
401 		if (ntuple_ids[i] == -1)
402 			continue;
403 		run_command("ethtool -N %s delete %d",
404 			    ifname, ntuple_ids[i]);
405 		ntuple_ids[i] = -1;
406 	}
407 }
408 
409 static const char *tcp_data_split_str(int val)
410 {
411 	switch (val) {
412 	case 0:
413 		return "off";
414 	case 1:
415 		return "auto";
416 	case 2:
417 		return "on";
418 	default:
419 		return "?";
420 	}
421 }
422 
423 static struct ethtool_rings_get_rsp *get_ring_config(void)
424 {
425 	struct ethtool_rings_get_req *get_req;
426 	struct ethtool_rings_get_rsp *get_rsp;
427 	struct ynl_error yerr;
428 	struct ynl_sock *ys;
429 
430 	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
431 	if (!ys) {
432 		fprintf(stderr, "YNL: %s\n", yerr.msg);
433 		return NULL;
434 	}
435 
436 	get_req = ethtool_rings_get_req_alloc();
437 	ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
438 	get_rsp = ethtool_rings_get(ys, get_req);
439 	ethtool_rings_get_req_free(get_req);
440 
441 	ynl_sock_destroy(ys);
442 
443 	return get_rsp;
444 }
445 
446 static void restore_ring_config(const struct ethtool_rings_get_rsp *config)
447 {
448 	struct ethtool_rings_get_req *get_req;
449 	struct ethtool_rings_get_rsp *get_rsp;
450 	struct ethtool_rings_set_req *req;
451 	struct ynl_error yerr;
452 	struct ynl_sock *ys;
453 	int ret;
454 
455 	if (!config)
456 		return;
457 
458 	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
459 	if (!ys) {
460 		fprintf(stderr, "YNL: %s\n", yerr.msg);
461 		return;
462 	}
463 
464 	req = ethtool_rings_set_req_alloc();
465 	ethtool_rings_set_req_set_header_dev_index(req, ifindex);
466 	ethtool_rings_set_req_set_tcp_data_split(req,
467 						ETHTOOL_TCP_DATA_SPLIT_UNKNOWN);
468 	if (config->_present.hds_thresh)
469 		ethtool_rings_set_req_set_hds_thresh(req, config->hds_thresh);
470 
471 	ret = ethtool_rings_set(ys, req);
472 	if (ret < 0)
473 		fprintf(stderr, "YNL restoring HDS cfg: %s\n", ys->err.msg);
474 
475 	get_req = ethtool_rings_get_req_alloc();
476 	ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
477 	get_rsp = ethtool_rings_get(ys, get_req);
478 	ethtool_rings_get_req_free(get_req);
479 
480 	/* use explicit value if UKNOWN didn't give us the previous */
481 	if (get_rsp->tcp_data_split != config->tcp_data_split) {
482 		ethtool_rings_set_req_set_tcp_data_split(req,
483 							config->tcp_data_split);
484 		ret = ethtool_rings_set(ys, req);
485 		if (ret < 0)
486 			fprintf(stderr, "YNL restoring expl HDS cfg: %s\n",
487 				ys->err.msg);
488 	}
489 
490 	ethtool_rings_get_rsp_free(get_rsp);
491 	ethtool_rings_set_req_free(req);
492 
493 	ynl_sock_destroy(ys);
494 }
495 
496 static int
497 configure_headersplit(const struct ethtool_rings_get_rsp *old, bool on)
498 {
499 	struct ethtool_rings_get_req *get_req;
500 	struct ethtool_rings_get_rsp *get_rsp;
501 	struct ethtool_rings_set_req *req;
502 	struct ynl_error yerr;
503 	struct ynl_sock *ys;
504 	int ret;
505 
506 	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
507 	if (!ys) {
508 		fprintf(stderr, "YNL: %s\n", yerr.msg);
509 		return -1;
510 	}
511 
512 	req = ethtool_rings_set_req_alloc();
513 	ethtool_rings_set_req_set_header_dev_index(req, ifindex);
514 	if (on) {
515 		ethtool_rings_set_req_set_tcp_data_split(req,
516 						ETHTOOL_TCP_DATA_SPLIT_ENABLED);
517 		if (old->_present.hds_thresh)
518 			ethtool_rings_set_req_set_hds_thresh(req, 0);
519 	} else {
520 		ethtool_rings_set_req_set_tcp_data_split(req,
521 						ETHTOOL_TCP_DATA_SPLIT_UNKNOWN);
522 	}
523 	ret = ethtool_rings_set(ys, req);
524 	if (ret < 0)
525 		fprintf(stderr, "YNL failed: %s\n", ys->err.msg);
526 	ethtool_rings_set_req_free(req);
527 
528 	if (ret == 0) {
529 		get_req = ethtool_rings_get_req_alloc();
530 		ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
531 		get_rsp = ethtool_rings_get(ys, get_req);
532 		ethtool_rings_get_req_free(get_req);
533 		if (get_rsp)
534 			fprintf(stderr, "TCP header split: %s\n",
535 				tcp_data_split_str(get_rsp->tcp_data_split));
536 		ethtool_rings_get_rsp_free(get_rsp);
537 	}
538 
539 	ynl_sock_destroy(ys);
540 
541 	return ret;
542 }
543 
544 static int configure_rss(void)
545 {
546 	return run_command("ethtool -X %s equal %d >&2", ifname, start_queue);
547 }
548 
549 static void reset_rss(void)
550 {
551 	run_command("ethtool -X %s default >&2", ifname, start_queue);
552 }
553 
554 static int check_changing_channels(unsigned int rx, unsigned int tx)
555 {
556 	struct ethtool_channels_get_req *gchan;
557 	struct ethtool_channels_set_req *schan;
558 	struct ethtool_channels_get_rsp *chan;
559 	struct ynl_error yerr;
560 	struct ynl_sock *ys;
561 	int ret;
562 
563 	fprintf(stderr, "setting channel count rx:%u tx:%u\n", rx, tx);
564 
565 	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
566 	if (!ys) {
567 		fprintf(stderr, "YNL: %s\n", yerr.msg);
568 		return -1;
569 	}
570 
571 	gchan = ethtool_channels_get_req_alloc();
572 	if (!gchan) {
573 		ret = -1;
574 		goto exit_close_sock;
575 	}
576 
577 	ethtool_channels_get_req_set_header_dev_index(gchan, ifindex);
578 	chan = ethtool_channels_get(ys, gchan);
579 	ethtool_channels_get_req_free(gchan);
580 	if (!chan) {
581 		fprintf(stderr, "YNL get channels: %s\n", ys->err.msg);
582 		ret = -1;
583 		goto exit_close_sock;
584 	}
585 
586 	schan =	ethtool_channels_set_req_alloc();
587 	if (!schan) {
588 		ret = -1;
589 		goto exit_free_chan;
590 	}
591 
592 	ethtool_channels_set_req_set_header_dev_index(schan, ifindex);
593 
594 	if (chan->_present.combined_count) {
595 		if (chan->_present.rx_count || chan->_present.tx_count) {
596 			ethtool_channels_set_req_set_rx_count(schan, 0);
597 			ethtool_channels_set_req_set_tx_count(schan, 0);
598 		}
599 
600 		if (rx == tx) {
601 			ethtool_channels_set_req_set_combined_count(schan, rx);
602 		} else if (rx > tx) {
603 			ethtool_channels_set_req_set_combined_count(schan, tx);
604 			ethtool_channels_set_req_set_rx_count(schan, rx - tx);
605 		} else {
606 			ethtool_channels_set_req_set_combined_count(schan, rx);
607 			ethtool_channels_set_req_set_tx_count(schan, tx - rx);
608 		}
609 
610 	} else if (chan->_present.rx_count) {
611 		ethtool_channels_set_req_set_rx_count(schan, rx);
612 		ethtool_channels_set_req_set_tx_count(schan, tx);
613 	} else {
614 		fprintf(stderr, "Error: device has neither combined nor rx channels\n");
615 		ret = -1;
616 		goto exit_free_schan;
617 	}
618 
619 	ret = ethtool_channels_set(ys, schan);
620 	if (ret) {
621 		fprintf(stderr, "YNL set channels: %s\n", ys->err.msg);
622 	} else {
623 		/* We were expecting a failure, go back to previous settings */
624 		ethtool_channels_set_req_set_combined_count(schan,
625 							    chan->combined_count);
626 		ethtool_channels_set_req_set_rx_count(schan, chan->rx_count);
627 		ethtool_channels_set_req_set_tx_count(schan, chan->tx_count);
628 
629 		ret = ethtool_channels_set(ys, schan);
630 		if (ret)
631 			fprintf(stderr, "YNL un-setting channels: %s\n",
632 				ys->err.msg);
633 	}
634 
635 exit_free_schan:
636 	ethtool_channels_set_req_free(schan);
637 exit_free_chan:
638 	ethtool_channels_get_rsp_free(chan);
639 exit_close_sock:
640 	ynl_sock_destroy(ys);
641 
642 	return ret;
643 }
644 
645 static int configure_flow_steering(struct sockaddr_in6 *server_sin)
646 {
647 	const char *type = "tcp6";
648 	const char *server_addr;
649 	char buf[40];
650 	int flow_id;
651 
652 	inet_ntop(AF_INET6, &server_sin->sin6_addr, buf, sizeof(buf));
653 	server_addr = buf;
654 
655 	if (IN6_IS_ADDR_V4MAPPED(&server_sin->sin6_addr)) {
656 		type = "tcp4";
657 		server_addr = strrchr(server_addr, ':') + 1;
658 	}
659 
660 	/* Try configure 5-tuple */
661 	flow_id = ethtool_add_flow("flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d",
662 				   type,
663 				   client_ip ? "src-ip" : "",
664 				   client_ip ?: "",
665 				   server_addr,
666 				   client_ip ? "src-port" : "",
667 				   client_ip ? port : "",
668 				   port, start_queue);
669 	if (flow_id < 0) {
670 		/* If that fails, try configure 3-tuple */
671 		flow_id = ethtool_add_flow("flow-type %s dst-ip %s dst-port %s queue %d",
672 					   type, server_addr, port, start_queue);
673 		if (flow_id < 0)
674 			/* If that fails, return error */
675 			return -1;
676 	}
677 
678 	return 0;
679 }
680 
681 static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
682 			 struct netdev_queue_id *queues,
683 			 unsigned int n_queue_index, struct ynl_sock **ys)
684 {
685 	struct netdev_bind_rx_req *req = NULL;
686 	struct netdev_bind_rx_rsp *rsp = NULL;
687 	struct ynl_error yerr;
688 
689 	*ys = ynl_sock_create(&ynl_netdev_family, &yerr);
690 	if (!*ys) {
691 		netdev_queue_id_free(queues);
692 		fprintf(stderr, "YNL: %s\n", yerr.msg);
693 		return -1;
694 	}
695 
696 	req = netdev_bind_rx_req_alloc();
697 	netdev_bind_rx_req_set_ifindex(req, ifindex);
698 	netdev_bind_rx_req_set_fd(req, dmabuf_fd);
699 	__netdev_bind_rx_req_set_queues(req, queues, n_queue_index);
700 
701 	rsp = netdev_bind_rx(*ys, req);
702 	if (!rsp) {
703 		perror("netdev_bind_rx");
704 		goto err_close;
705 	}
706 
707 	if (!rsp->_present.id) {
708 		perror("id not present");
709 		goto err_close;
710 	}
711 
712 	fprintf(stderr, "got dmabuf id=%d\n", rsp->id);
713 	dmabuf_id = rsp->id;
714 
715 	netdev_bind_rx_req_free(req);
716 	netdev_bind_rx_rsp_free(rsp);
717 
718 	return 0;
719 
720 err_close:
721 	fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg);
722 	netdev_bind_rx_req_free(req);
723 	ynl_sock_destroy(*ys);
724 	return -1;
725 }
726 
727 static int bind_tx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
728 			 struct ynl_sock **ys)
729 {
730 	struct netdev_bind_tx_req *req = NULL;
731 	struct netdev_bind_tx_rsp *rsp = NULL;
732 	struct ynl_error yerr;
733 
734 	*ys = ynl_sock_create(&ynl_netdev_family, &yerr);
735 	if (!*ys) {
736 		fprintf(stderr, "YNL: %s\n", yerr.msg);
737 		return -1;
738 	}
739 
740 	req = netdev_bind_tx_req_alloc();
741 	netdev_bind_tx_req_set_ifindex(req, ifindex);
742 	netdev_bind_tx_req_set_fd(req, dmabuf_fd);
743 
744 	rsp = netdev_bind_tx(*ys, req);
745 	if (!rsp) {
746 		perror("netdev_bind_tx");
747 		goto err_close;
748 	}
749 
750 	if (!rsp->_present.id) {
751 		perror("id not present");
752 		goto err_close;
753 	}
754 
755 	fprintf(stderr, "got tx dmabuf id=%d\n", rsp->id);
756 	tx_dmabuf_id = rsp->id;
757 
758 	netdev_bind_tx_req_free(req);
759 	netdev_bind_tx_rsp_free(rsp);
760 
761 	return 0;
762 
763 err_close:
764 	fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg);
765 	netdev_bind_tx_req_free(req);
766 	ynl_sock_destroy(*ys);
767 	return -1;
768 }
769 
770 static int enable_reuseaddr(int fd)
771 {
772 	int opt = 1;
773 	int ret;
774 
775 	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
776 	if (ret) {
777 		pr_err("SO_REUSEPORT failed");
778 		return -1;
779 	}
780 
781 	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
782 	if (ret) {
783 		pr_err("SO_REUSEADDR failed");
784 		return -1;
785 	}
786 
787 	return 0;
788 }
789 
790 static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
791 {
792 	int ret;
793 
794 	sin6->sin6_family = AF_INET6;
795 	sin6->sin6_port = htons(port);
796 
797 	ret = inet_pton(sin6->sin6_family, str, &sin6->sin6_addr);
798 	if (ret != 1) {
799 		/* fallback to plain IPv4 */
800 		ret = inet_pton(AF_INET, str, &sin6->sin6_addr.s6_addr32[3]);
801 		if (ret != 1)
802 			return -1;
803 
804 		/* add ::ffff prefix */
805 		sin6->sin6_addr.s6_addr32[0] = 0;
806 		sin6->sin6_addr.s6_addr32[1] = 0;
807 		sin6->sin6_addr.s6_addr16[4] = 0;
808 		sin6->sin6_addr.s6_addr16[5] = 0xffff;
809 	}
810 
811 	return 0;
812 }
813 
814 static struct netdev_queue_id *create_queues(void)
815 {
816 	struct netdev_queue_id *queues;
817 	size_t i = 0;
818 
819 	queues = netdev_queue_id_alloc(num_queues);
820 	for (i = 0; i < num_queues; i++) {
821 		netdev_queue_id_set_type(&queues[i], NETDEV_QUEUE_TYPE_RX);
822 		netdev_queue_id_set_id(&queues[i], start_queue + i);
823 	}
824 
825 	return queues;
826 }
827 
828 static int do_server(struct memory_buffer *mem)
829 {
830 	struct ethtool_rings_get_rsp *ring_config;
831 	char ctrl_data[sizeof(int) * 20000];
832 	size_t non_page_aligned_frags = 0;
833 	struct sockaddr_in6 client_addr;
834 	struct sockaddr_in6 server_sin;
835 	size_t page_aligned_frags = 0;
836 	size_t total_received = 0;
837 	socklen_t client_addr_len;
838 	bool is_devmem = false;
839 	char *tmp_mem = NULL;
840 	struct ynl_sock *ys;
841 	char iobuf[819200];
842 	int ret, err = -1;
843 	char buffer[256];
844 	int socket_fd;
845 	int client_fd;
846 
847 	ret = parse_address(server_ip, atoi(port), &server_sin);
848 	if (ret < 0) {
849 		pr_err("parse server address");
850 		return -1;
851 	}
852 
853 	ring_config = get_ring_config();
854 	if (!ring_config) {
855 		pr_err("Failed to get current ring configuration");
856 		return -1;
857 	}
858 
859 	if (configure_headersplit(ring_config, 1)) {
860 		pr_err("Failed to enable TCP header split");
861 		goto err_free_ring_config;
862 	}
863 
864 	/* Configure RSS to divert all traffic from our devmem queues */
865 	if (configure_rss()) {
866 		pr_err("Failed to configure rss");
867 		goto err_reset_headersplit;
868 	}
869 
870 	/* Flow steer our devmem flows to start_queue */
871 	if (configure_flow_steering(&server_sin)) {
872 		pr_err("Failed to configure flow steering");
873 		goto err_reset_rss;
874 	}
875 
876 	if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys)) {
877 		pr_err("Failed to bind");
878 		goto err_reset_flow_steering;
879 	}
880 
881 	tmp_mem = malloc(mem->size);
882 	if (!tmp_mem)
883 		goto err_unbind;
884 
885 	socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
886 	if (socket_fd < 0) {
887 		pr_err("Failed to create socket");
888 		goto err_free_tmp;
889 	}
890 
891 	if (enable_reuseaddr(socket_fd))
892 		goto err_close_socket;
893 
894 	fprintf(stderr, "binding to address %s:%d\n", server_ip,
895 		ntohs(server_sin.sin6_port));
896 
897 	ret = bind(socket_fd, &server_sin, sizeof(server_sin));
898 	if (ret) {
899 		pr_err("Failed to bind");
900 		goto err_close_socket;
901 	}
902 
903 	ret = listen(socket_fd, 1);
904 	if (ret) {
905 		pr_err("Failed to listen");
906 		goto err_close_socket;
907 	}
908 
909 	client_addr_len = sizeof(client_addr);
910 
911 	inet_ntop(AF_INET6, &server_sin.sin6_addr, buffer,
912 		  sizeof(buffer));
913 	fprintf(stderr, "Waiting or connection on %s:%d\n", buffer,
914 		ntohs(server_sin.sin6_port));
915 	client_fd = accept(socket_fd, &client_addr, &client_addr_len);
916 	if (client_fd < 0) {
917 		pr_err("Failed to accept");
918 		goto err_close_socket;
919 	}
920 
921 	inet_ntop(AF_INET6, &client_addr.sin6_addr, buffer,
922 		  sizeof(buffer));
923 	fprintf(stderr, "Got connection from %s:%d\n", buffer,
924 		ntohs(client_addr.sin6_port));
925 
926 	while (1) {
927 		struct iovec iov = { .iov_base = iobuf,
928 				     .iov_len = sizeof(iobuf) };
929 		struct dmabuf_cmsg *dmabuf_cmsg = NULL;
930 		struct cmsghdr *cm = NULL;
931 		struct msghdr msg = { 0 };
932 		struct dmabuf_token token;
933 		ssize_t ret;
934 
935 		is_devmem = false;
936 
937 		msg.msg_iov = &iov;
938 		msg.msg_iovlen = 1;
939 		msg.msg_control = ctrl_data;
940 		msg.msg_controllen = sizeof(ctrl_data);
941 		ret = recvmsg(client_fd, &msg, MSG_SOCK_DEVMEM);
942 		fprintf(stderr, "recvmsg ret=%ld\n", ret);
943 		if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK))
944 			continue;
945 		if (ret < 0) {
946 			perror("recvmsg");
947 			if (errno == EFAULT) {
948 				pr_err("received EFAULT, won't recover");
949 				goto err_close_client;
950 			}
951 			continue;
952 		}
953 		if (ret == 0) {
954 			errno = 0;
955 			pr_err("client exited");
956 			goto cleanup;
957 		}
958 
959 		for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
960 			if (cm->cmsg_level != SOL_SOCKET ||
961 			    (cm->cmsg_type != SCM_DEVMEM_DMABUF &&
962 			     cm->cmsg_type != SCM_DEVMEM_LINEAR)) {
963 				fprintf(stderr, "skipping non-devmem cmsg\n");
964 				continue;
965 			}
966 
967 			dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm);
968 			is_devmem = true;
969 
970 			if (cm->cmsg_type == SCM_DEVMEM_LINEAR) {
971 				/* TODO: process data copied from skb's linear
972 				 * buffer.
973 				 */
974 				fprintf(stderr,
975 					"SCM_DEVMEM_LINEAR. dmabuf_cmsg->frag_size=%u\n",
976 					dmabuf_cmsg->frag_size);
977 
978 				continue;
979 			}
980 
981 			token.token_start = dmabuf_cmsg->frag_token;
982 			token.token_count = 1;
983 
984 			total_received += dmabuf_cmsg->frag_size;
985 			fprintf(stderr,
986 				"received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n",
987 				dmabuf_cmsg->frag_offset >> PAGE_SHIFT,
988 				dmabuf_cmsg->frag_offset % getpagesize(),
989 				dmabuf_cmsg->frag_offset,
990 				dmabuf_cmsg->frag_size, dmabuf_cmsg->frag_token,
991 				total_received, dmabuf_cmsg->dmabuf_id);
992 
993 			if (dmabuf_cmsg->dmabuf_id != dmabuf_id) {
994 				pr_err("received on wrong dmabuf_id: flow steering error");
995 				goto err_close_client;
996 			}
997 
998 			if (dmabuf_cmsg->frag_size % getpagesize())
999 				non_page_aligned_frags++;
1000 			else
1001 				page_aligned_frags++;
1002 
1003 			provider->memcpy_from_device(tmp_mem, mem,
1004 						     dmabuf_cmsg->frag_offset,
1005 						     dmabuf_cmsg->frag_size);
1006 
1007 			if (do_validation) {
1008 				if (validate_buffer(tmp_mem,
1009 						    dmabuf_cmsg->frag_size))
1010 					goto err_close_client;
1011 			} else {
1012 				print_nonzero_bytes(tmp_mem,
1013 						    dmabuf_cmsg->frag_size);
1014 			}
1015 
1016 			ret = setsockopt(client_fd, SOL_SOCKET,
1017 					 SO_DEVMEM_DONTNEED, &token,
1018 					 sizeof(token));
1019 			if (ret != 1) {
1020 				pr_err("SO_DEVMEM_DONTNEED not enough tokens");
1021 				goto err_close_client;
1022 			}
1023 		}
1024 		if (!is_devmem) {
1025 			pr_err("flow steering error");
1026 			goto err_close_client;
1027 		}
1028 
1029 		fprintf(stderr, "total_received=%lu\n", total_received);
1030 	}
1031 
1032 	fprintf(stderr, "%s: ok\n", TEST_PREFIX);
1033 
1034 	fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
1035 		page_aligned_frags, non_page_aligned_frags);
1036 
1037 cleanup:
1038 	err = 0;
1039 
1040 err_close_client:
1041 	close(client_fd);
1042 err_close_socket:
1043 	close(socket_fd);
1044 err_free_tmp:
1045 	free(tmp_mem);
1046 err_unbind:
1047 	ynl_sock_destroy(ys);
1048 err_reset_flow_steering:
1049 	reset_flow_steering();
1050 err_reset_rss:
1051 	reset_rss();
1052 err_reset_headersplit:
1053 	restore_ring_config(ring_config);
1054 err_free_ring_config:
1055 	ethtool_rings_get_rsp_free(ring_config);
1056 	return err;
1057 }
1058 
1059 int run_devmem_tests(void)
1060 {
1061 	struct ethtool_rings_get_rsp *ring_config;
1062 	struct netdev_queue_id *queues;
1063 	struct memory_buffer *mem;
1064 	struct ynl_sock *ys;
1065 	int err = -1;
1066 
1067 	mem = provider->alloc(getpagesize() * NUM_PAGES);
1068 	if (!mem) {
1069 		pr_err("Failed to allocate memory buffer");
1070 		return -1;
1071 	}
1072 
1073 	ring_config = get_ring_config();
1074 	if (!ring_config) {
1075 		pr_err("Failed to get current ring configuration");
1076 		goto err_free_mem;
1077 	}
1078 
1079 	/* Configure RSS to divert all traffic from our devmem queues */
1080 	if (configure_rss()) {
1081 		pr_err("rss error");
1082 		goto err_free_ring_config;
1083 	}
1084 
1085 	if (configure_headersplit(ring_config, 1)) {
1086 		pr_err("Failed to configure header split");
1087 		goto err_reset_rss;
1088 	}
1089 
1090 	queues = netdev_queue_id_alloc(num_queues);
1091 	if (!queues) {
1092 		pr_err("Failed to allocate empty queues array");
1093 		goto err_reset_headersplit;
1094 	}
1095 
1096 	if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
1097 		pr_err("Binding empty queues array should have failed");
1098 		goto err_unbind;
1099 	}
1100 
1101 	if (configure_headersplit(ring_config, 0)) {
1102 		pr_err("Failed to configure header split");
1103 		goto err_reset_headersplit;
1104 	}
1105 
1106 	queues = create_queues();
1107 	if (!queues) {
1108 		pr_err("Failed to create queues");
1109 		goto err_reset_headersplit;
1110 	}
1111 
1112 	if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
1113 		pr_err("Configure dmabuf with header split off should have failed");
1114 		goto err_unbind;
1115 	}
1116 
1117 	if (configure_headersplit(ring_config, 1)) {
1118 		pr_err("Failed to configure header split");
1119 		goto err_reset_headersplit;
1120 	}
1121 
1122 	queues = create_queues();
1123 	if (!queues) {
1124 		pr_err("Failed to create queues");
1125 		goto err_reset_headersplit;
1126 	}
1127 
1128 	if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
1129 		pr_err("Failed to bind");
1130 		goto err_reset_headersplit;
1131 	}
1132 
1133 	/* Deactivating a bound queue should not be legal */
1134 	if (!check_changing_channels(num_queues, num_queues)) {
1135 		pr_err("Deactivating a bound queue should be illegal");
1136 		goto err_unbind;
1137 	}
1138 
1139 	err = 0;
1140 	goto err_unbind;
1141 
1142 err_unbind:
1143 	ynl_sock_destroy(ys);
1144 err_reset_headersplit:
1145 	restore_ring_config(ring_config);
1146 err_reset_rss:
1147 	reset_rss();
1148 err_free_ring_config:
1149 	ethtool_rings_get_rsp_free(ring_config);
1150 err_free_mem:
1151 	provider->free(mem);
1152 	return err;
1153 }
1154 
1155 static uint64_t gettimeofday_ms(void)
1156 {
1157 	struct timeval tv;
1158 
1159 	gettimeofday(&tv, NULL);
1160 	return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000ULL);
1161 }
1162 
1163 static int do_poll(int fd)
1164 {
1165 	struct pollfd pfd;
1166 	int ret;
1167 
1168 	pfd.revents = 0;
1169 	pfd.fd = fd;
1170 
1171 	ret = poll(&pfd, 1, waittime_ms);
1172 	if (ret == -1) {
1173 		pr_err("poll");
1174 		return -1;
1175 	}
1176 
1177 	return ret && (pfd.revents & POLLERR);
1178 }
1179 
1180 static int wait_compl(int fd)
1181 {
1182 	int64_t tstop = gettimeofday_ms() + waittime_ms;
1183 	char control[CMSG_SPACE(100)] = {};
1184 	struct sock_extended_err *serr;
1185 	struct msghdr msg = {};
1186 	struct cmsghdr *cm;
1187 	__u32 hi, lo;
1188 	int ret;
1189 
1190 	msg.msg_control = control;
1191 	msg.msg_controllen = sizeof(control);
1192 
1193 	while (gettimeofday_ms() < tstop) {
1194 		ret = do_poll(fd);
1195 		if (ret < 0)
1196 			return ret;
1197 		if (!ret)
1198 			continue;
1199 
1200 		ret = recvmsg(fd, &msg, MSG_ERRQUEUE);
1201 		if (ret < 0) {
1202 			if (errno == EAGAIN)
1203 				continue;
1204 			pr_err("recvmsg(MSG_ERRQUEUE)");
1205 			return -1;
1206 		}
1207 		if (msg.msg_flags & MSG_CTRUNC) {
1208 			pr_err("MSG_CTRUNC");
1209 			return -1;
1210 		}
1211 
1212 		for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
1213 			if (cm->cmsg_level != SOL_IP &&
1214 			    cm->cmsg_level != SOL_IPV6)
1215 				continue;
1216 			if (cm->cmsg_level == SOL_IP &&
1217 			    cm->cmsg_type != IP_RECVERR)
1218 				continue;
1219 			if (cm->cmsg_level == SOL_IPV6 &&
1220 			    cm->cmsg_type != IPV6_RECVERR)
1221 				continue;
1222 
1223 			serr = (void *)CMSG_DATA(cm);
1224 			if (serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) {
1225 				pr_err("wrong origin %u", serr->ee_origin);
1226 				return -1;
1227 			}
1228 			if (serr->ee_errno != 0) {
1229 				pr_err("wrong errno %d", serr->ee_errno);
1230 				return -1;
1231 			}
1232 
1233 			hi = serr->ee_data;
1234 			lo = serr->ee_info;
1235 
1236 			fprintf(stderr, "tx complete [%d,%d]\n", lo, hi);
1237 			return 0;
1238 		}
1239 	}
1240 
1241 	pr_err("did not receive tx completion");
1242 	return -1;
1243 }
1244 
1245 static int do_client(struct memory_buffer *mem)
1246 {
1247 	char ctrl_data[CMSG_SPACE(sizeof(__u32))];
1248 	struct sockaddr_in6 server_sin;
1249 	struct sockaddr_in6 client_sin;
1250 	struct ynl_sock *ys = NULL;
1251 	struct iovec iov[MAX_IOV];
1252 	struct msghdr msg = {};
1253 	ssize_t line_size = 0;
1254 	struct cmsghdr *cmsg;
1255 	char *line = NULL;
1256 	int ret, err = -1;
1257 	size_t len = 0;
1258 	int socket_fd;
1259 	__u32 ddmabuf;
1260 	int opt = 1;
1261 
1262 	ret = parse_address(server_ip, atoi(port), &server_sin);
1263 	if (ret < 0) {
1264 		pr_err("parse server address");
1265 		return -1;
1266 	}
1267 
1268 	if (client_ip) {
1269 		ret = parse_address(client_ip, atoi(port), &client_sin);
1270 		if (ret < 0) {
1271 			pr_err("parse client address");
1272 			return ret;
1273 		}
1274 	}
1275 
1276 	socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
1277 	if (socket_fd < 0) {
1278 		pr_err("create socket");
1279 		return -1;
1280 	}
1281 
1282 	if (enable_reuseaddr(socket_fd))
1283 		goto err_close_socket;
1284 
1285 	ret = setsockopt(socket_fd, SOL_SOCKET, SO_BINDTODEVICE, ifname,
1286 			 strlen(ifname) + 1);
1287 	if (ret) {
1288 		pr_err("bindtodevice");
1289 		goto err_close_socket;
1290 	}
1291 
1292 	if (bind_tx_queue(ifindex, mem->fd, &ys)) {
1293 		pr_err("Failed to bind");
1294 		goto err_close_socket;
1295 	}
1296 
1297 	if (client_ip) {
1298 		ret = bind(socket_fd, &client_sin, sizeof(client_sin));
1299 		if (ret) {
1300 			pr_err("bind");
1301 			goto err_unbind;
1302 		}
1303 	}
1304 
1305 	ret = setsockopt(socket_fd, SOL_SOCKET, SO_ZEROCOPY, &opt, sizeof(opt));
1306 	if (ret) {
1307 		pr_err("set sock opt");
1308 		goto err_unbind;
1309 	}
1310 
1311 	fprintf(stderr, "Connect to %s %d (via %s)\n", server_ip,
1312 		ntohs(server_sin.sin6_port), ifname);
1313 
1314 	ret = connect(socket_fd, &server_sin, sizeof(server_sin));
1315 	if (ret) {
1316 		pr_err("connect");
1317 		goto err_unbind;
1318 	}
1319 
1320 	while (1) {
1321 		free(line);
1322 		line = NULL;
1323 		line_size = getline(&line, &len, stdin);
1324 
1325 		if (line_size < 0)
1326 			break;
1327 
1328 		if (max_chunk) {
1329 			msg.msg_iovlen =
1330 				(line_size + max_chunk - 1) / max_chunk;
1331 			if (msg.msg_iovlen > MAX_IOV) {
1332 				pr_err("can't partition %zd bytes into maximum of %d chunks",
1333 				       line_size, MAX_IOV);
1334 				goto err_free_line;
1335 			}
1336 
1337 			for (int i = 0; i < msg.msg_iovlen; i++) {
1338 				iov[i].iov_base = (void *)(i * max_chunk);
1339 				iov[i].iov_len = max_chunk;
1340 			}
1341 
1342 			iov[msg.msg_iovlen - 1].iov_len =
1343 				line_size - (msg.msg_iovlen - 1) * max_chunk;
1344 		} else {
1345 			iov[0].iov_base = 0;
1346 			iov[0].iov_len = line_size;
1347 			msg.msg_iovlen = 1;
1348 		}
1349 
1350 		msg.msg_iov = iov;
1351 		provider->memcpy_to_device(mem, 0, line, line_size);
1352 
1353 		msg.msg_control = ctrl_data;
1354 		msg.msg_controllen = sizeof(ctrl_data);
1355 
1356 		cmsg = CMSG_FIRSTHDR(&msg);
1357 		cmsg->cmsg_level = SOL_SOCKET;
1358 		cmsg->cmsg_type = SCM_DEVMEM_DMABUF;
1359 		cmsg->cmsg_len = CMSG_LEN(sizeof(__u32));
1360 
1361 		ddmabuf = tx_dmabuf_id;
1362 
1363 		*((__u32 *)CMSG_DATA(cmsg)) = ddmabuf;
1364 
1365 		ret = sendmsg(socket_fd, &msg, MSG_ZEROCOPY);
1366 		if (ret < 0) {
1367 			pr_err("Failed sendmsg");
1368 			goto err_free_line;
1369 		}
1370 
1371 		fprintf(stderr, "sendmsg_ret=%d\n", ret);
1372 
1373 		if (ret != line_size) {
1374 			pr_err("Did not send all bytes %d vs %zd", ret, line_size);
1375 			goto err_free_line;
1376 		}
1377 
1378 		if (wait_compl(socket_fd))
1379 			goto err_free_line;
1380 	}
1381 
1382 	fprintf(stderr, "%s: tx ok\n", TEST_PREFIX);
1383 
1384 	err = 0;
1385 
1386 err_free_line:
1387 	free(line);
1388 err_unbind:
1389 	ynl_sock_destroy(ys);
1390 err_close_socket:
1391 	close(socket_fd);
1392 	return err;
1393 }
1394 
1395 int main(int argc, char *argv[])
1396 {
1397 	struct memory_buffer *mem;
1398 	int is_server = 0, opt;
1399 	int ret, err = 1;
1400 
1401 	while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:z:")) != -1) {
1402 		switch (opt) {
1403 		case 'l':
1404 			is_server = 1;
1405 			break;
1406 		case 's':
1407 			server_ip = optarg;
1408 			break;
1409 		case 'c':
1410 			client_ip = optarg;
1411 			break;
1412 		case 'p':
1413 			port = optarg;
1414 			break;
1415 		case 'v':
1416 			do_validation = atoll(optarg);
1417 			break;
1418 		case 'q':
1419 			num_queues = atoi(optarg);
1420 			break;
1421 		case 't':
1422 			start_queue = atoi(optarg);
1423 			break;
1424 		case 'f':
1425 			ifname = optarg;
1426 			break;
1427 		case 'z':
1428 			max_chunk = atoi(optarg);
1429 			break;
1430 		case '?':
1431 			fprintf(stderr, "unknown option: %c\n", optopt);
1432 			break;
1433 		}
1434 	}
1435 
1436 	if (!ifname) {
1437 		pr_err("Missing -f argument");
1438 		return 1;
1439 	}
1440 
1441 	ifindex = if_nametoindex(ifname);
1442 
1443 	fprintf(stderr, "using ifindex=%u\n", ifindex);
1444 
1445 	if (!server_ip && !client_ip) {
1446 		if (start_queue < 0 && num_queues < 0) {
1447 			num_queues = rxq_num(ifindex);
1448 			if (num_queues < 0) {
1449 				pr_err("couldn't detect number of queues");
1450 				return 1;
1451 			}
1452 			if (num_queues < 2) {
1453 				pr_err("number of device queues is too low");
1454 				return 1;
1455 			}
1456 			/* make sure can bind to multiple queues */
1457 			start_queue = num_queues / 2;
1458 			num_queues /= 2;
1459 		}
1460 
1461 		if (start_queue < 0 || num_queues < 0) {
1462 			pr_err("Both -t and -q are required");
1463 			return 1;
1464 		}
1465 
1466 		return run_devmem_tests();
1467 	}
1468 
1469 	if (start_queue < 0 && num_queues < 0) {
1470 		num_queues = rxq_num(ifindex);
1471 		if (num_queues < 2) {
1472 			pr_err("number of device queues is too low");
1473 			return 1;
1474 		}
1475 
1476 		num_queues = 1;
1477 		start_queue = rxq_num(ifindex) - num_queues;
1478 
1479 		if (start_queue < 0) {
1480 			pr_err("couldn't detect number of queues");
1481 			return 1;
1482 		}
1483 
1484 		fprintf(stderr, "using queues %d..%d\n", start_queue, start_queue + num_queues);
1485 	}
1486 
1487 	for (; optind < argc; optind++)
1488 		fprintf(stderr, "extra arguments: %s\n", argv[optind]);
1489 
1490 	if (start_queue < 0) {
1491 		pr_err("Missing -t argument");
1492 		return 1;
1493 	}
1494 
1495 	if (num_queues < 0) {
1496 		pr_err("Missing -q argument");
1497 		return 1;
1498 	}
1499 
1500 	if (!server_ip) {
1501 		pr_err("Missing -s argument");
1502 		return 1;
1503 	}
1504 
1505 	if (!port) {
1506 		pr_err("Missing -p argument");
1507 		return 1;
1508 	}
1509 
1510 	mem = provider->alloc(getpagesize() * NUM_PAGES);
1511 	if (!mem) {
1512 		pr_err("Failed to allocate memory buffer");
1513 		return 1;
1514 	}
1515 
1516 	ret = is_server ? do_server(mem) : do_client(mem);
1517 	if (ret)
1518 		goto err_free_mem;
1519 
1520 	err = 0;
1521 
1522 err_free_mem:
1523 	provider->free(mem);
1524 	return err;
1525 }
1526