xref: /linux/tools/testing/selftests/drivers/net/hw/ncdevmem.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * tcpdevmem netcat. Works similarly to netcat but does device memory TCP
4  * instead of regular TCP. Uses udmabuf to mock a dmabuf provider.
5  *
6  * Usage:
7  *
8  *     On server:
9  *     ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201
10  *
11  *     On client:
12  *     echo -n "hello\nworld" | \
13  *		ncdevmem -s <server IP> [-c <client IP>] -p 5201 -f eth1
14  *
15  * Note this is compatible with regular netcat. i.e. the sender or receiver can
16  * be replaced with regular netcat to test the RX or TX path in isolation.
17  *
18  * Test data validation (devmem TCP on RX only):
19  *
20  *     On server:
21  *     ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201 -v 7
22  *
23  *     On client:
24  *     yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \
25  *             head -c 1G | \
26  *             nc <server IP> 5201 -p 5201
27  *
28  * Test data validation (devmem TCP on RX and TX, validation happens on RX):
29  *
30  *	On server:
31  *	ncdevmem -s <server IP> [-c <client IP>] -l -p 5201 -v 8 -f eth1
32  *
33  *	On client:
34  *	yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06\\x07) | \
35  *		head -c 1M | \
36  *		ncdevmem -s <server IP> [-c <client IP>] -p 5201 -f eth1
37  */
38 #define _GNU_SOURCE
39 #define __EXPORTED_HEADERS__
40 
41 #include <linux/uio.h>
42 #include <stdarg.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <unistd.h>
46 #include <stdbool.h>
47 #include <string.h>
48 #include <errno.h>
49 #define __iovec_defined
50 #include <fcntl.h>
51 #include <malloc.h>
52 #include <error.h>
53 #include <poll.h>
54 
55 #include <arpa/inet.h>
56 #include <sys/socket.h>
57 #include <sys/mman.h>
58 #include <sys/ioctl.h>
59 #include <sys/syscall.h>
60 #include <sys/time.h>
61 
62 #include <linux/memfd.h>
63 #include <linux/dma-buf.h>
64 #include <linux/errqueue.h>
65 #include <linux/udmabuf.h>
66 #include <linux/types.h>
67 #include <linux/netlink.h>
68 #include <linux/genetlink.h>
69 #include <linux/netdev.h>
70 #include <linux/ethtool_netlink.h>
71 #include <time.h>
72 #include <net/if.h>
73 
74 #include "netdev-user.h"
75 #include "ethtool-user.h"
76 #include <ynl.h>
77 
78 #define PAGE_SHIFT 12
79 #define TEST_PREFIX "ncdevmem"
80 #define NUM_PAGES 16000
81 
82 #ifndef MSG_SOCK_DEVMEM
83 #define MSG_SOCK_DEVMEM 0x2000000
84 #endif
85 
86 #define MAX_IOV 1024
87 
88 static size_t max_chunk;
89 static char *server_ip;
90 static char *client_ip;
91 static char *port;
92 static size_t do_validation;
93 static int start_queue = -1;
94 static int num_queues = -1;
95 static char *ifname;
96 static unsigned int ifindex;
97 static unsigned int dmabuf_id;
98 static uint32_t tx_dmabuf_id;
99 static int waittime_ms = 500;
100 
101 /* System state loaded by current_config_load() */
102 #define MAX_FLOWS	8
103 static int ntuple_ids[MAX_FLOWS] = { -1, -1, -1, -1, -1, -1, -1, -1, };
104 
105 struct memory_buffer {
106 	int fd;
107 	size_t size;
108 
109 	int devfd;
110 	int memfd;
111 	char *buf_mem;
112 };
113 
114 struct memory_provider {
115 	struct memory_buffer *(*alloc)(size_t size);
116 	void (*free)(struct memory_buffer *ctx);
117 	void (*memcpy_to_device)(struct memory_buffer *dst, size_t off,
118 				 void *src, int n);
119 	void (*memcpy_from_device)(void *dst, struct memory_buffer *src,
120 				   size_t off, int n);
121 };
122 
123 static void pr_err(const char *fmt, ...)
124 {
125 	va_list args;
126 
127 	fprintf(stderr, "%s: ", TEST_PREFIX);
128 
129 	va_start(args, fmt);
130 	vfprintf(stderr, fmt, args);
131 	va_end(args);
132 
133 	if (errno != 0)
134 		fprintf(stderr, ": %s", strerror(errno));
135 	fprintf(stderr, "\n");
136 }
137 
138 static struct memory_buffer *udmabuf_alloc(size_t size)
139 {
140 	struct udmabuf_create create;
141 	struct memory_buffer *ctx;
142 	int ret;
143 
144 	ctx = malloc(sizeof(*ctx));
145 	if (!ctx)
146 		return NULL;
147 
148 	ctx->size = size;
149 
150 	ctx->devfd = open("/dev/udmabuf", O_RDWR);
151 	if (ctx->devfd < 0) {
152 		pr_err("[skip,no-udmabuf: Unable to access DMA buffer device file]");
153 		goto err_free_ctx;
154 	}
155 
156 	ctx->memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
157 	if (ctx->memfd < 0) {
158 		pr_err("[skip,no-memfd]");
159 		goto err_close_dev;
160 	}
161 
162 	ret = fcntl(ctx->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
163 	if (ret < 0) {
164 		pr_err("[skip,fcntl-add-seals]");
165 		goto err_close_memfd;
166 	}
167 
168 	ret = ftruncate(ctx->memfd, size);
169 	if (ret == -1) {
170 		pr_err("[FAIL,memfd-truncate]");
171 		goto err_close_memfd;
172 	}
173 
174 	memset(&create, 0, sizeof(create));
175 
176 	create.memfd = ctx->memfd;
177 	create.offset = 0;
178 	create.size = size;
179 	ctx->fd = ioctl(ctx->devfd, UDMABUF_CREATE, &create);
180 	if (ctx->fd < 0) {
181 		pr_err("[FAIL, create udmabuf]");
182 		goto err_close_fd;
183 	}
184 
185 	ctx->buf_mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
186 			    ctx->fd, 0);
187 	if (ctx->buf_mem == MAP_FAILED) {
188 		pr_err("[FAIL, map udmabuf]");
189 		goto err_close_fd;
190 	}
191 
192 	return ctx;
193 
194 err_close_fd:
195 	close(ctx->fd);
196 err_close_memfd:
197 	close(ctx->memfd);
198 err_close_dev:
199 	close(ctx->devfd);
200 err_free_ctx:
201 	free(ctx);
202 	return NULL;
203 }
204 
205 static void udmabuf_free(struct memory_buffer *ctx)
206 {
207 	munmap(ctx->buf_mem, ctx->size);
208 	close(ctx->fd);
209 	close(ctx->memfd);
210 	close(ctx->devfd);
211 	free(ctx);
212 }
213 
214 static void udmabuf_memcpy_to_device(struct memory_buffer *dst, size_t off,
215 				     void *src, int n)
216 {
217 	struct dma_buf_sync sync = {};
218 
219 	sync.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_WRITE;
220 	ioctl(dst->fd, DMA_BUF_IOCTL_SYNC, &sync);
221 
222 	memcpy(dst->buf_mem + off, src, n);
223 
224 	sync.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_WRITE;
225 	ioctl(dst->fd, DMA_BUF_IOCTL_SYNC, &sync);
226 }
227 
228 static void udmabuf_memcpy_from_device(void *dst, struct memory_buffer *src,
229 				       size_t off, int n)
230 {
231 	struct dma_buf_sync sync = {};
232 
233 	sync.flags = DMA_BUF_SYNC_START;
234 	ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
235 
236 	memcpy(dst, src->buf_mem + off, n);
237 
238 	sync.flags = DMA_BUF_SYNC_END;
239 	ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
240 }
241 
242 static struct memory_provider udmabuf_memory_provider = {
243 	.alloc = udmabuf_alloc,
244 	.free = udmabuf_free,
245 	.memcpy_to_device = udmabuf_memcpy_to_device,
246 	.memcpy_from_device = udmabuf_memcpy_from_device,
247 };
248 
249 static struct memory_provider *provider = &udmabuf_memory_provider;
250 
251 static void print_nonzero_bytes(void *ptr, size_t size)
252 {
253 	unsigned char *p = ptr;
254 	unsigned int i;
255 
256 	for (i = 0; i < size; i++)
257 		putchar(p[i]);
258 }
259 
260 int validate_buffer(void *line, size_t size)
261 {
262 	static unsigned char seed = 1;
263 	unsigned char *ptr = line;
264 	unsigned char expected;
265 	static int errors;
266 	size_t i;
267 
268 	for (i = 0; i < size; i++) {
269 		expected = seed ? seed : '\n';
270 		if (ptr[i] != expected) {
271 			fprintf(stderr,
272 				"Failed validation: expected=%u, actual=%u, index=%lu\n",
273 				expected, ptr[i], i);
274 			errors++;
275 			if (errors > 20) {
276 				pr_err("validation failed");
277 				return -1;
278 			}
279 		}
280 		seed++;
281 		if (seed == do_validation)
282 			seed = 0;
283 	}
284 
285 	fprintf(stdout, "Validated buffer\n");
286 	return 0;
287 }
288 
289 static int
290 __run_command(char *out, size_t outlen, const char *cmd, va_list args)
291 {
292 	char command[256];
293 	FILE *fp;
294 
295 	vsnprintf(command, sizeof(command), cmd, args);
296 
297 	fprintf(stderr, "Running: %s\n", command);
298 	fp = popen(command, "r");
299 	if (!fp)
300 		return -1;
301 	if (out) {
302 		size_t len;
303 
304 		if (!fgets(out, outlen, fp))
305 			return -1;
306 
307 		/* Remove trailing newline if present */
308 		len = strlen(out);
309 		if (len && out[len - 1] == '\n')
310 			out[len - 1] = '\0';
311 	}
312 	return pclose(fp);
313 }
314 
315 static int run_command(const char *cmd, ...)
316 {
317 	va_list args;
318 	int ret;
319 
320 	va_start(args, cmd);
321 	ret = __run_command(NULL, 0, cmd, args);
322 	va_end(args);
323 
324 	return ret;
325 }
326 
327 static int ethtool_add_flow(const char *format, ...)
328 {
329 	char local_output[256], cmd[256];
330 	const char *id_start;
331 	int flow_idx, ret;
332 	char *endptr;
333 	long flow_id;
334 	va_list args;
335 
336 	for (flow_idx = 0; flow_idx < MAX_FLOWS; flow_idx++)
337 		if (ntuple_ids[flow_idx] == -1)
338 			break;
339 	if (flow_idx == MAX_FLOWS) {
340 		fprintf(stderr, "Error: too many flows\n");
341 		return -1;
342 	}
343 
344 	snprintf(cmd, sizeof(cmd), "ethtool -N %s %s", ifname, format);
345 
346 	va_start(args, format);
347 	ret = __run_command(local_output, sizeof(local_output), cmd, args);
348 	va_end(args);
349 
350 	if (ret != 0)
351 		return ret;
352 
353 	/* Extract the ID from the output */
354 	id_start = strstr(local_output, "Added rule with ID ");
355 	if (!id_start)
356 		return -1;
357 	id_start += strlen("Added rule with ID ");
358 
359 	flow_id = strtol(id_start, &endptr, 10);
360 	if (endptr == id_start || flow_id < 0 || flow_id > INT_MAX)
361 		return -1;
362 
363 	fprintf(stderr, "Added flow rule with ID %ld\n", flow_id);
364 	ntuple_ids[flow_idx] = flow_id;
365 	return flow_id;
366 }
367 
368 static int rxq_num(int ifindex)
369 {
370 	struct ethtool_channels_get_req *req;
371 	struct ethtool_channels_get_rsp *rsp;
372 	struct ynl_error yerr;
373 	struct ynl_sock *ys;
374 	int num = -1;
375 
376 	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
377 	if (!ys) {
378 		fprintf(stderr, "YNL: %s\n", yerr.msg);
379 		return -1;
380 	}
381 
382 	req = ethtool_channels_get_req_alloc();
383 	ethtool_channels_get_req_set_header_dev_index(req, ifindex);
384 	rsp = ethtool_channels_get(ys, req);
385 	if (rsp)
386 		num = rsp->rx_count + rsp->combined_count;
387 	ethtool_channels_get_req_free(req);
388 	ethtool_channels_get_rsp_free(rsp);
389 
390 	ynl_sock_destroy(ys);
391 
392 	return num;
393 }
394 
395 static void reset_flow_steering(void)
396 {
397 	int i;
398 
399 	for (i = 0; i < MAX_FLOWS; i++) {
400 		if (ntuple_ids[i] == -1)
401 			continue;
402 		run_command("ethtool -N %s delete %d",
403 			    ifname, ntuple_ids[i]);
404 		ntuple_ids[i] = -1;
405 	}
406 }
407 
408 static const char *tcp_data_split_str(int val)
409 {
410 	switch (val) {
411 	case 0:
412 		return "off";
413 	case 1:
414 		return "auto";
415 	case 2:
416 		return "on";
417 	default:
418 		return "?";
419 	}
420 }
421 
422 static struct ethtool_rings_get_rsp *get_ring_config(void)
423 {
424 	struct ethtool_rings_get_req *get_req;
425 	struct ethtool_rings_get_rsp *get_rsp;
426 	struct ynl_error yerr;
427 	struct ynl_sock *ys;
428 
429 	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
430 	if (!ys) {
431 		fprintf(stderr, "YNL: %s\n", yerr.msg);
432 		return NULL;
433 	}
434 
435 	get_req = ethtool_rings_get_req_alloc();
436 	ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
437 	get_rsp = ethtool_rings_get(ys, get_req);
438 	ethtool_rings_get_req_free(get_req);
439 
440 	ynl_sock_destroy(ys);
441 
442 	return get_rsp;
443 }
444 
445 static void restore_ring_config(const struct ethtool_rings_get_rsp *config)
446 {
447 	struct ethtool_rings_get_req *get_req;
448 	struct ethtool_rings_get_rsp *get_rsp;
449 	struct ethtool_rings_set_req *req;
450 	struct ynl_error yerr;
451 	struct ynl_sock *ys;
452 	int ret;
453 
454 	if (!config)
455 		return;
456 
457 	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
458 	if (!ys) {
459 		fprintf(stderr, "YNL: %s\n", yerr.msg);
460 		return;
461 	}
462 
463 	req = ethtool_rings_set_req_alloc();
464 	ethtool_rings_set_req_set_header_dev_index(req, ifindex);
465 	ethtool_rings_set_req_set_tcp_data_split(req,
466 						ETHTOOL_TCP_DATA_SPLIT_UNKNOWN);
467 	if (config->_present.hds_thresh)
468 		ethtool_rings_set_req_set_hds_thresh(req, config->hds_thresh);
469 
470 	ret = ethtool_rings_set(ys, req);
471 	if (ret < 0)
472 		fprintf(stderr, "YNL restoring HDS cfg: %s\n", ys->err.msg);
473 
474 	get_req = ethtool_rings_get_req_alloc();
475 	ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
476 	get_rsp = ethtool_rings_get(ys, get_req);
477 	ethtool_rings_get_req_free(get_req);
478 
479 	/* use explicit value if UKNOWN didn't give us the previous */
480 	if (get_rsp->tcp_data_split != config->tcp_data_split) {
481 		ethtool_rings_set_req_set_tcp_data_split(req,
482 							config->tcp_data_split);
483 		ret = ethtool_rings_set(ys, req);
484 		if (ret < 0)
485 			fprintf(stderr, "YNL restoring expl HDS cfg: %s\n",
486 				ys->err.msg);
487 	}
488 
489 	ethtool_rings_get_rsp_free(get_rsp);
490 	ethtool_rings_set_req_free(req);
491 
492 	ynl_sock_destroy(ys);
493 }
494 
495 static int
496 configure_headersplit(const struct ethtool_rings_get_rsp *old, bool on)
497 {
498 	struct ethtool_rings_get_req *get_req;
499 	struct ethtool_rings_get_rsp *get_rsp;
500 	struct ethtool_rings_set_req *req;
501 	struct ynl_error yerr;
502 	struct ynl_sock *ys;
503 	int ret;
504 
505 	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
506 	if (!ys) {
507 		fprintf(stderr, "YNL: %s\n", yerr.msg);
508 		return -1;
509 	}
510 
511 	req = ethtool_rings_set_req_alloc();
512 	ethtool_rings_set_req_set_header_dev_index(req, ifindex);
513 	if (on) {
514 		ethtool_rings_set_req_set_tcp_data_split(req,
515 						ETHTOOL_TCP_DATA_SPLIT_ENABLED);
516 		if (old->_present.hds_thresh)
517 			ethtool_rings_set_req_set_hds_thresh(req, 0);
518 	} else {
519 		ethtool_rings_set_req_set_tcp_data_split(req,
520 						ETHTOOL_TCP_DATA_SPLIT_UNKNOWN);
521 	}
522 	ret = ethtool_rings_set(ys, req);
523 	if (ret < 0)
524 		fprintf(stderr, "YNL failed: %s\n", ys->err.msg);
525 	ethtool_rings_set_req_free(req);
526 
527 	if (ret == 0) {
528 		get_req = ethtool_rings_get_req_alloc();
529 		ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
530 		get_rsp = ethtool_rings_get(ys, get_req);
531 		ethtool_rings_get_req_free(get_req);
532 		if (get_rsp)
533 			fprintf(stderr, "TCP header split: %s\n",
534 				tcp_data_split_str(get_rsp->tcp_data_split));
535 		ethtool_rings_get_rsp_free(get_rsp);
536 	}
537 
538 	ynl_sock_destroy(ys);
539 
540 	return ret;
541 }
542 
543 static int configure_rss(void)
544 {
545 	return run_command("ethtool -X %s equal %d >&2", ifname, start_queue);
546 }
547 
548 static void reset_rss(void)
549 {
550 	run_command("ethtool -X %s default >&2", ifname, start_queue);
551 }
552 
553 static int check_changing_channels(unsigned int rx, unsigned int tx)
554 {
555 	struct ethtool_channels_get_req *gchan;
556 	struct ethtool_channels_set_req *schan;
557 	struct ethtool_channels_get_rsp *chan;
558 	struct ynl_error yerr;
559 	struct ynl_sock *ys;
560 	int ret;
561 
562 	fprintf(stderr, "setting channel count rx:%u tx:%u\n", rx, tx);
563 
564 	ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
565 	if (!ys) {
566 		fprintf(stderr, "YNL: %s\n", yerr.msg);
567 		return -1;
568 	}
569 
570 	gchan = ethtool_channels_get_req_alloc();
571 	if (!gchan) {
572 		ret = -1;
573 		goto exit_close_sock;
574 	}
575 
576 	ethtool_channels_get_req_set_header_dev_index(gchan, ifindex);
577 	chan = ethtool_channels_get(ys, gchan);
578 	ethtool_channels_get_req_free(gchan);
579 	if (!chan) {
580 		fprintf(stderr, "YNL get channels: %s\n", ys->err.msg);
581 		ret = -1;
582 		goto exit_close_sock;
583 	}
584 
585 	schan =	ethtool_channels_set_req_alloc();
586 	if (!schan) {
587 		ret = -1;
588 		goto exit_free_chan;
589 	}
590 
591 	ethtool_channels_set_req_set_header_dev_index(schan, ifindex);
592 
593 	if (chan->_present.combined_count) {
594 		if (chan->_present.rx_count || chan->_present.tx_count) {
595 			ethtool_channels_set_req_set_rx_count(schan, 0);
596 			ethtool_channels_set_req_set_tx_count(schan, 0);
597 		}
598 
599 		if (rx == tx) {
600 			ethtool_channels_set_req_set_combined_count(schan, rx);
601 		} else if (rx > tx) {
602 			ethtool_channels_set_req_set_combined_count(schan, tx);
603 			ethtool_channels_set_req_set_rx_count(schan, rx - tx);
604 		} else {
605 			ethtool_channels_set_req_set_combined_count(schan, rx);
606 			ethtool_channels_set_req_set_tx_count(schan, tx - rx);
607 		}
608 
609 	} else if (chan->_present.rx_count) {
610 		ethtool_channels_set_req_set_rx_count(schan, rx);
611 		ethtool_channels_set_req_set_tx_count(schan, tx);
612 	} else {
613 		fprintf(stderr, "Error: device has neither combined nor rx channels\n");
614 		ret = -1;
615 		goto exit_free_schan;
616 	}
617 
618 	ret = ethtool_channels_set(ys, schan);
619 	if (ret) {
620 		fprintf(stderr, "YNL set channels: %s\n", ys->err.msg);
621 	} else {
622 		/* We were expecting a failure, go back to previous settings */
623 		ethtool_channels_set_req_set_combined_count(schan,
624 							    chan->combined_count);
625 		ethtool_channels_set_req_set_rx_count(schan, chan->rx_count);
626 		ethtool_channels_set_req_set_tx_count(schan, chan->tx_count);
627 
628 		ret = ethtool_channels_set(ys, schan);
629 		if (ret)
630 			fprintf(stderr, "YNL un-setting channels: %s\n",
631 				ys->err.msg);
632 	}
633 
634 exit_free_schan:
635 	ethtool_channels_set_req_free(schan);
636 exit_free_chan:
637 	ethtool_channels_get_rsp_free(chan);
638 exit_close_sock:
639 	ynl_sock_destroy(ys);
640 
641 	return ret;
642 }
643 
644 static int configure_flow_steering(struct sockaddr_in6 *server_sin)
645 {
646 	const char *type = "tcp6";
647 	const char *server_addr;
648 	char buf[40];
649 	int flow_id;
650 
651 	inet_ntop(AF_INET6, &server_sin->sin6_addr, buf, sizeof(buf));
652 	server_addr = buf;
653 
654 	if (IN6_IS_ADDR_V4MAPPED(&server_sin->sin6_addr)) {
655 		type = "tcp4";
656 		server_addr = strrchr(server_addr, ':') + 1;
657 	}
658 
659 	/* Try configure 5-tuple */
660 	flow_id = ethtool_add_flow("flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d",
661 				   type,
662 				   client_ip ? "src-ip" : "",
663 				   client_ip ?: "",
664 				   server_addr,
665 				   client_ip ? "src-port" : "",
666 				   client_ip ? port : "",
667 				   port, start_queue);
668 	if (flow_id < 0) {
669 		/* If that fails, try configure 3-tuple */
670 		flow_id = ethtool_add_flow("flow-type %s dst-ip %s dst-port %s queue %d",
671 					   type, server_addr, port, start_queue);
672 		if (flow_id < 0)
673 			/* If that fails, return error */
674 			return -1;
675 	}
676 
677 	return 0;
678 }
679 
680 static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
681 			 struct netdev_queue_id *queues,
682 			 unsigned int n_queue_index, struct ynl_sock **ys)
683 {
684 	struct netdev_bind_rx_req *req = NULL;
685 	struct netdev_bind_rx_rsp *rsp = NULL;
686 	struct ynl_error yerr;
687 
688 	*ys = ynl_sock_create(&ynl_netdev_family, &yerr);
689 	if (!*ys) {
690 		netdev_queue_id_free(queues);
691 		fprintf(stderr, "YNL: %s\n", yerr.msg);
692 		return -1;
693 	}
694 
695 	req = netdev_bind_rx_req_alloc();
696 	netdev_bind_rx_req_set_ifindex(req, ifindex);
697 	netdev_bind_rx_req_set_fd(req, dmabuf_fd);
698 	__netdev_bind_rx_req_set_queues(req, queues, n_queue_index);
699 
700 	rsp = netdev_bind_rx(*ys, req);
701 	if (!rsp) {
702 		perror("netdev_bind_rx");
703 		goto err_close;
704 	}
705 
706 	if (!rsp->_present.id) {
707 		perror("id not present");
708 		goto err_close;
709 	}
710 
711 	fprintf(stderr, "got dmabuf id=%d\n", rsp->id);
712 	dmabuf_id = rsp->id;
713 
714 	netdev_bind_rx_req_free(req);
715 	netdev_bind_rx_rsp_free(rsp);
716 
717 	return 0;
718 
719 err_close:
720 	fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg);
721 	netdev_bind_rx_req_free(req);
722 	ynl_sock_destroy(*ys);
723 	return -1;
724 }
725 
726 static int bind_tx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
727 			 struct ynl_sock **ys)
728 {
729 	struct netdev_bind_tx_req *req = NULL;
730 	struct netdev_bind_tx_rsp *rsp = NULL;
731 	struct ynl_error yerr;
732 
733 	*ys = ynl_sock_create(&ynl_netdev_family, &yerr);
734 	if (!*ys) {
735 		fprintf(stderr, "YNL: %s\n", yerr.msg);
736 		return -1;
737 	}
738 
739 	req = netdev_bind_tx_req_alloc();
740 	netdev_bind_tx_req_set_ifindex(req, ifindex);
741 	netdev_bind_tx_req_set_fd(req, dmabuf_fd);
742 
743 	rsp = netdev_bind_tx(*ys, req);
744 	if (!rsp) {
745 		perror("netdev_bind_tx");
746 		goto err_close;
747 	}
748 
749 	if (!rsp->_present.id) {
750 		perror("id not present");
751 		goto err_close;
752 	}
753 
754 	fprintf(stderr, "got tx dmabuf id=%d\n", rsp->id);
755 	tx_dmabuf_id = rsp->id;
756 
757 	netdev_bind_tx_req_free(req);
758 	netdev_bind_tx_rsp_free(rsp);
759 
760 	return 0;
761 
762 err_close:
763 	fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg);
764 	netdev_bind_tx_req_free(req);
765 	ynl_sock_destroy(*ys);
766 	return -1;
767 }
768 
769 static int enable_reuseaddr(int fd)
770 {
771 	int opt = 1;
772 	int ret;
773 
774 	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
775 	if (ret) {
776 		pr_err("SO_REUSEPORT failed");
777 		return -1;
778 	}
779 
780 	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
781 	if (ret) {
782 		pr_err("SO_REUSEADDR failed");
783 		return -1;
784 	}
785 
786 	return 0;
787 }
788 
789 static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
790 {
791 	int ret;
792 
793 	sin6->sin6_family = AF_INET6;
794 	sin6->sin6_port = htons(port);
795 
796 	ret = inet_pton(sin6->sin6_family, str, &sin6->sin6_addr);
797 	if (ret != 1) {
798 		/* fallback to plain IPv4 */
799 		ret = inet_pton(AF_INET, str, &sin6->sin6_addr.s6_addr32[3]);
800 		if (ret != 1)
801 			return -1;
802 
803 		/* add ::ffff prefix */
804 		sin6->sin6_addr.s6_addr32[0] = 0;
805 		sin6->sin6_addr.s6_addr32[1] = 0;
806 		sin6->sin6_addr.s6_addr16[4] = 0;
807 		sin6->sin6_addr.s6_addr16[5] = 0xffff;
808 	}
809 
810 	return 0;
811 }
812 
813 static struct netdev_queue_id *create_queues(void)
814 {
815 	struct netdev_queue_id *queues;
816 	size_t i = 0;
817 
818 	queues = netdev_queue_id_alloc(num_queues);
819 	for (i = 0; i < num_queues; i++) {
820 		netdev_queue_id_set_type(&queues[i], NETDEV_QUEUE_TYPE_RX);
821 		netdev_queue_id_set_id(&queues[i], start_queue + i);
822 	}
823 
824 	return queues;
825 }
826 
827 static int do_server(struct memory_buffer *mem)
828 {
829 	struct ethtool_rings_get_rsp *ring_config;
830 	char ctrl_data[sizeof(int) * 20000];
831 	size_t non_page_aligned_frags = 0;
832 	struct sockaddr_in6 client_addr;
833 	struct sockaddr_in6 server_sin;
834 	size_t page_aligned_frags = 0;
835 	size_t total_received = 0;
836 	socklen_t client_addr_len;
837 	bool is_devmem = false;
838 	char *tmp_mem = NULL;
839 	struct ynl_sock *ys;
840 	char iobuf[819200];
841 	int ret, err = -1;
842 	char buffer[256];
843 	int socket_fd;
844 	int client_fd;
845 
846 	ret = parse_address(server_ip, atoi(port), &server_sin);
847 	if (ret < 0) {
848 		pr_err("parse server address");
849 		return -1;
850 	}
851 
852 	ring_config = get_ring_config();
853 	if (!ring_config) {
854 		pr_err("Failed to get current ring configuration");
855 		return -1;
856 	}
857 
858 	if (configure_headersplit(ring_config, 1)) {
859 		pr_err("Failed to enable TCP header split");
860 		goto err_free_ring_config;
861 	}
862 
863 	/* Configure RSS to divert all traffic from our devmem queues */
864 	if (configure_rss()) {
865 		pr_err("Failed to configure rss");
866 		goto err_reset_headersplit;
867 	}
868 
869 	/* Flow steer our devmem flows to start_queue */
870 	if (configure_flow_steering(&server_sin)) {
871 		pr_err("Failed to configure flow steering");
872 		goto err_reset_rss;
873 	}
874 
875 	if (bind_rx_queue(ifindex, mem->fd, create_queues(), num_queues, &ys)) {
876 		pr_err("Failed to bind");
877 		goto err_reset_flow_steering;
878 	}
879 
880 	tmp_mem = malloc(mem->size);
881 	if (!tmp_mem)
882 		goto err_unbind;
883 
884 	socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
885 	if (socket_fd < 0) {
886 		pr_err("Failed to create socket");
887 		goto err_free_tmp;
888 	}
889 
890 	if (enable_reuseaddr(socket_fd))
891 		goto err_close_socket;
892 
893 	fprintf(stderr, "binding to address %s:%d\n", server_ip,
894 		ntohs(server_sin.sin6_port));
895 
896 	ret = bind(socket_fd, &server_sin, sizeof(server_sin));
897 	if (ret) {
898 		pr_err("Failed to bind");
899 		goto err_close_socket;
900 	}
901 
902 	ret = listen(socket_fd, 1);
903 	if (ret) {
904 		pr_err("Failed to listen");
905 		goto err_close_socket;
906 	}
907 
908 	client_addr_len = sizeof(client_addr);
909 
910 	inet_ntop(AF_INET6, &server_sin.sin6_addr, buffer,
911 		  sizeof(buffer));
912 	fprintf(stderr, "Waiting or connection on %s:%d\n", buffer,
913 		ntohs(server_sin.sin6_port));
914 	client_fd = accept(socket_fd, &client_addr, &client_addr_len);
915 	if (client_fd < 0) {
916 		pr_err("Failed to accept");
917 		goto err_close_socket;
918 	}
919 
920 	inet_ntop(AF_INET6, &client_addr.sin6_addr, buffer,
921 		  sizeof(buffer));
922 	fprintf(stderr, "Got connection from %s:%d\n", buffer,
923 		ntohs(client_addr.sin6_port));
924 
925 	while (1) {
926 		struct iovec iov = { .iov_base = iobuf,
927 				     .iov_len = sizeof(iobuf) };
928 		struct dmabuf_cmsg *dmabuf_cmsg = NULL;
929 		struct cmsghdr *cm = NULL;
930 		struct msghdr msg = { 0 };
931 		struct dmabuf_token token;
932 		ssize_t ret;
933 
934 		is_devmem = false;
935 
936 		msg.msg_iov = &iov;
937 		msg.msg_iovlen = 1;
938 		msg.msg_control = ctrl_data;
939 		msg.msg_controllen = sizeof(ctrl_data);
940 		ret = recvmsg(client_fd, &msg, MSG_SOCK_DEVMEM);
941 		fprintf(stderr, "recvmsg ret=%ld\n", ret);
942 		if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK))
943 			continue;
944 		if (ret < 0) {
945 			perror("recvmsg");
946 			if (errno == EFAULT) {
947 				pr_err("received EFAULT, won't recover");
948 				goto err_close_client;
949 			}
950 			continue;
951 		}
952 		if (ret == 0) {
953 			errno = 0;
954 			pr_err("client exited");
955 			goto cleanup;
956 		}
957 
958 		for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
959 			if (cm->cmsg_level != SOL_SOCKET ||
960 			    (cm->cmsg_type != SCM_DEVMEM_DMABUF &&
961 			     cm->cmsg_type != SCM_DEVMEM_LINEAR)) {
962 				fprintf(stderr, "skipping non-devmem cmsg\n");
963 				continue;
964 			}
965 
966 			dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm);
967 			is_devmem = true;
968 
969 			if (cm->cmsg_type == SCM_DEVMEM_LINEAR) {
970 				/* TODO: process data copied from skb's linear
971 				 * buffer.
972 				 */
973 				fprintf(stderr,
974 					"SCM_DEVMEM_LINEAR. dmabuf_cmsg->frag_size=%u\n",
975 					dmabuf_cmsg->frag_size);
976 
977 				continue;
978 			}
979 
980 			token.token_start = dmabuf_cmsg->frag_token;
981 			token.token_count = 1;
982 
983 			total_received += dmabuf_cmsg->frag_size;
984 			fprintf(stderr,
985 				"received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n",
986 				dmabuf_cmsg->frag_offset >> PAGE_SHIFT,
987 				dmabuf_cmsg->frag_offset % getpagesize(),
988 				dmabuf_cmsg->frag_offset,
989 				dmabuf_cmsg->frag_size, dmabuf_cmsg->frag_token,
990 				total_received, dmabuf_cmsg->dmabuf_id);
991 
992 			if (dmabuf_cmsg->dmabuf_id != dmabuf_id) {
993 				pr_err("received on wrong dmabuf_id: flow steering error");
994 				goto err_close_client;
995 			}
996 
997 			if (dmabuf_cmsg->frag_size % getpagesize())
998 				non_page_aligned_frags++;
999 			else
1000 				page_aligned_frags++;
1001 
1002 			provider->memcpy_from_device(tmp_mem, mem,
1003 						     dmabuf_cmsg->frag_offset,
1004 						     dmabuf_cmsg->frag_size);
1005 
1006 			if (do_validation) {
1007 				if (validate_buffer(tmp_mem,
1008 						    dmabuf_cmsg->frag_size))
1009 					goto err_close_client;
1010 			} else {
1011 				print_nonzero_bytes(tmp_mem,
1012 						    dmabuf_cmsg->frag_size);
1013 			}
1014 
1015 			ret = setsockopt(client_fd, SOL_SOCKET,
1016 					 SO_DEVMEM_DONTNEED, &token,
1017 					 sizeof(token));
1018 			if (ret != 1) {
1019 				pr_err("SO_DEVMEM_DONTNEED not enough tokens");
1020 				goto err_close_client;
1021 			}
1022 		}
1023 		if (!is_devmem) {
1024 			pr_err("flow steering error");
1025 			goto err_close_client;
1026 		}
1027 
1028 		fprintf(stderr, "total_received=%lu\n", total_received);
1029 	}
1030 
1031 	fprintf(stderr, "%s: ok\n", TEST_PREFIX);
1032 
1033 	fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
1034 		page_aligned_frags, non_page_aligned_frags);
1035 
1036 cleanup:
1037 	err = 0;
1038 
1039 err_close_client:
1040 	close(client_fd);
1041 err_close_socket:
1042 	close(socket_fd);
1043 err_free_tmp:
1044 	free(tmp_mem);
1045 err_unbind:
1046 	ynl_sock_destroy(ys);
1047 err_reset_flow_steering:
1048 	reset_flow_steering();
1049 err_reset_rss:
1050 	reset_rss();
1051 err_reset_headersplit:
1052 	restore_ring_config(ring_config);
1053 err_free_ring_config:
1054 	ethtool_rings_get_rsp_free(ring_config);
1055 	return err;
1056 }
1057 
1058 int run_devmem_tests(void)
1059 {
1060 	struct ethtool_rings_get_rsp *ring_config;
1061 	struct netdev_queue_id *queues;
1062 	struct memory_buffer *mem;
1063 	struct ynl_sock *ys;
1064 	int err = -1;
1065 
1066 	mem = provider->alloc(getpagesize() * NUM_PAGES);
1067 	if (!mem) {
1068 		pr_err("Failed to allocate memory buffer");
1069 		return -1;
1070 	}
1071 
1072 	ring_config = get_ring_config();
1073 	if (!ring_config) {
1074 		pr_err("Failed to get current ring configuration");
1075 		goto err_free_mem;
1076 	}
1077 
1078 	/* Configure RSS to divert all traffic from our devmem queues */
1079 	if (configure_rss()) {
1080 		pr_err("rss error");
1081 		goto err_free_ring_config;
1082 	}
1083 
1084 	if (configure_headersplit(ring_config, 1)) {
1085 		pr_err("Failed to configure header split");
1086 		goto err_reset_rss;
1087 	}
1088 
1089 	queues = netdev_queue_id_alloc(num_queues);
1090 	if (!queues) {
1091 		pr_err("Failed to allocate empty queues array");
1092 		goto err_reset_headersplit;
1093 	}
1094 
1095 	if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
1096 		pr_err("Binding empty queues array should have failed");
1097 		goto err_unbind;
1098 	}
1099 
1100 	if (configure_headersplit(ring_config, 0)) {
1101 		pr_err("Failed to configure header split");
1102 		goto err_reset_headersplit;
1103 	}
1104 
1105 	queues = create_queues();
1106 	if (!queues) {
1107 		pr_err("Failed to create queues");
1108 		goto err_reset_headersplit;
1109 	}
1110 
1111 	if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
1112 		pr_err("Configure dmabuf with header split off should have failed");
1113 		goto err_unbind;
1114 	}
1115 
1116 	if (configure_headersplit(ring_config, 1)) {
1117 		pr_err("Failed to configure header split");
1118 		goto err_reset_headersplit;
1119 	}
1120 
1121 	queues = create_queues();
1122 	if (!queues) {
1123 		pr_err("Failed to create queues");
1124 		goto err_reset_headersplit;
1125 	}
1126 
1127 	if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys)) {
1128 		pr_err("Failed to bind");
1129 		goto err_reset_headersplit;
1130 	}
1131 
1132 	/* Deactivating a bound queue should not be legal */
1133 	if (!check_changing_channels(num_queues, num_queues)) {
1134 		pr_err("Deactivating a bound queue should be illegal");
1135 		goto err_unbind;
1136 	}
1137 
1138 	err = 0;
1139 	goto err_unbind;
1140 
1141 err_unbind:
1142 	ynl_sock_destroy(ys);
1143 err_reset_headersplit:
1144 	restore_ring_config(ring_config);
1145 err_reset_rss:
1146 	reset_rss();
1147 err_free_ring_config:
1148 	ethtool_rings_get_rsp_free(ring_config);
1149 err_free_mem:
1150 	provider->free(mem);
1151 	return err;
1152 }
1153 
1154 static uint64_t gettimeofday_ms(void)
1155 {
1156 	struct timeval tv;
1157 
1158 	gettimeofday(&tv, NULL);
1159 	return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000ULL);
1160 }
1161 
1162 static int do_poll(int fd)
1163 {
1164 	struct pollfd pfd;
1165 	int ret;
1166 
1167 	pfd.revents = 0;
1168 	pfd.fd = fd;
1169 
1170 	ret = poll(&pfd, 1, waittime_ms);
1171 	if (ret == -1) {
1172 		pr_err("poll");
1173 		return -1;
1174 	}
1175 
1176 	return ret && (pfd.revents & POLLERR);
1177 }
1178 
1179 static int wait_compl(int fd)
1180 {
1181 	int64_t tstop = gettimeofday_ms() + waittime_ms;
1182 	char control[CMSG_SPACE(100)] = {};
1183 	struct sock_extended_err *serr;
1184 	struct msghdr msg = {};
1185 	struct cmsghdr *cm;
1186 	__u32 hi, lo;
1187 	int ret;
1188 
1189 	msg.msg_control = control;
1190 	msg.msg_controllen = sizeof(control);
1191 
1192 	while (gettimeofday_ms() < tstop) {
1193 		ret = do_poll(fd);
1194 		if (ret < 0)
1195 			return ret;
1196 		if (!ret)
1197 			continue;
1198 
1199 		ret = recvmsg(fd, &msg, MSG_ERRQUEUE);
1200 		if (ret < 0) {
1201 			if (errno == EAGAIN)
1202 				continue;
1203 			pr_err("recvmsg(MSG_ERRQUEUE)");
1204 			return -1;
1205 		}
1206 		if (msg.msg_flags & MSG_CTRUNC) {
1207 			pr_err("MSG_CTRUNC");
1208 			return -1;
1209 		}
1210 
1211 		for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
1212 			if (cm->cmsg_level != SOL_IP &&
1213 			    cm->cmsg_level != SOL_IPV6)
1214 				continue;
1215 			if (cm->cmsg_level == SOL_IP &&
1216 			    cm->cmsg_type != IP_RECVERR)
1217 				continue;
1218 			if (cm->cmsg_level == SOL_IPV6 &&
1219 			    cm->cmsg_type != IPV6_RECVERR)
1220 				continue;
1221 
1222 			serr = (void *)CMSG_DATA(cm);
1223 			if (serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) {
1224 				pr_err("wrong origin %u", serr->ee_origin);
1225 				return -1;
1226 			}
1227 			if (serr->ee_errno != 0) {
1228 				pr_err("wrong errno %d", serr->ee_errno);
1229 				return -1;
1230 			}
1231 
1232 			hi = serr->ee_data;
1233 			lo = serr->ee_info;
1234 
1235 			fprintf(stderr, "tx complete [%d,%d]\n", lo, hi);
1236 			return 0;
1237 		}
1238 	}
1239 
1240 	pr_err("did not receive tx completion");
1241 	return -1;
1242 }
1243 
1244 static int do_client(struct memory_buffer *mem)
1245 {
1246 	char ctrl_data[CMSG_SPACE(sizeof(__u32))];
1247 	struct sockaddr_in6 server_sin;
1248 	struct sockaddr_in6 client_sin;
1249 	struct ynl_sock *ys = NULL;
1250 	struct iovec iov[MAX_IOV];
1251 	struct msghdr msg = {};
1252 	ssize_t line_size = 0;
1253 	struct cmsghdr *cmsg;
1254 	char *line = NULL;
1255 	int ret, err = -1;
1256 	size_t len = 0;
1257 	int socket_fd;
1258 	__u32 ddmabuf;
1259 	int opt = 1;
1260 
1261 	ret = parse_address(server_ip, atoi(port), &server_sin);
1262 	if (ret < 0) {
1263 		pr_err("parse server address");
1264 		return -1;
1265 	}
1266 
1267 	if (client_ip) {
1268 		ret = parse_address(client_ip, atoi(port), &client_sin);
1269 		if (ret < 0) {
1270 			pr_err("parse client address");
1271 			return ret;
1272 		}
1273 	}
1274 
1275 	socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
1276 	if (socket_fd < 0) {
1277 		pr_err("create socket");
1278 		return -1;
1279 	}
1280 
1281 	if (enable_reuseaddr(socket_fd))
1282 		goto err_close_socket;
1283 
1284 	ret = setsockopt(socket_fd, SOL_SOCKET, SO_BINDTODEVICE, ifname,
1285 			 strlen(ifname) + 1);
1286 	if (ret) {
1287 		pr_err("bindtodevice");
1288 		goto err_close_socket;
1289 	}
1290 
1291 	if (bind_tx_queue(ifindex, mem->fd, &ys)) {
1292 		pr_err("Failed to bind");
1293 		goto err_close_socket;
1294 	}
1295 
1296 	if (client_ip) {
1297 		ret = bind(socket_fd, &client_sin, sizeof(client_sin));
1298 		if (ret) {
1299 			pr_err("bind");
1300 			goto err_unbind;
1301 		}
1302 	}
1303 
1304 	ret = setsockopt(socket_fd, SOL_SOCKET, SO_ZEROCOPY, &opt, sizeof(opt));
1305 	if (ret) {
1306 		pr_err("set sock opt");
1307 		goto err_unbind;
1308 	}
1309 
1310 	fprintf(stderr, "Connect to %s %d (via %s)\n", server_ip,
1311 		ntohs(server_sin.sin6_port), ifname);
1312 
1313 	ret = connect(socket_fd, &server_sin, sizeof(server_sin));
1314 	if (ret) {
1315 		pr_err("connect");
1316 		goto err_unbind;
1317 	}
1318 
1319 	while (1) {
1320 		free(line);
1321 		line = NULL;
1322 		line_size = getline(&line, &len, stdin);
1323 
1324 		if (line_size < 0)
1325 			break;
1326 
1327 		if (max_chunk) {
1328 			msg.msg_iovlen =
1329 				(line_size + max_chunk - 1) / max_chunk;
1330 			if (msg.msg_iovlen > MAX_IOV) {
1331 				pr_err("can't partition %zd bytes into maximum of %d chunks",
1332 				       line_size, MAX_IOV);
1333 				goto err_free_line;
1334 			}
1335 
1336 			for (int i = 0; i < msg.msg_iovlen; i++) {
1337 				iov[i].iov_base = (void *)(i * max_chunk);
1338 				iov[i].iov_len = max_chunk;
1339 			}
1340 
1341 			iov[msg.msg_iovlen - 1].iov_len =
1342 				line_size - (msg.msg_iovlen - 1) * max_chunk;
1343 		} else {
1344 			iov[0].iov_base = 0;
1345 			iov[0].iov_len = line_size;
1346 			msg.msg_iovlen = 1;
1347 		}
1348 
1349 		msg.msg_iov = iov;
1350 		provider->memcpy_to_device(mem, 0, line, line_size);
1351 
1352 		msg.msg_control = ctrl_data;
1353 		msg.msg_controllen = sizeof(ctrl_data);
1354 
1355 		cmsg = CMSG_FIRSTHDR(&msg);
1356 		cmsg->cmsg_level = SOL_SOCKET;
1357 		cmsg->cmsg_type = SCM_DEVMEM_DMABUF;
1358 		cmsg->cmsg_len = CMSG_LEN(sizeof(__u32));
1359 
1360 		ddmabuf = tx_dmabuf_id;
1361 
1362 		*((__u32 *)CMSG_DATA(cmsg)) = ddmabuf;
1363 
1364 		ret = sendmsg(socket_fd, &msg, MSG_ZEROCOPY);
1365 		if (ret < 0) {
1366 			pr_err("Failed sendmsg");
1367 			goto err_free_line;
1368 		}
1369 
1370 		fprintf(stderr, "sendmsg_ret=%d\n", ret);
1371 
1372 		if (ret != line_size) {
1373 			pr_err("Did not send all bytes %d vs %zd", ret, line_size);
1374 			goto err_free_line;
1375 		}
1376 
1377 		if (wait_compl(socket_fd))
1378 			goto err_free_line;
1379 	}
1380 
1381 	fprintf(stderr, "%s: tx ok\n", TEST_PREFIX);
1382 
1383 	err = 0;
1384 
1385 err_free_line:
1386 	free(line);
1387 err_unbind:
1388 	ynl_sock_destroy(ys);
1389 err_close_socket:
1390 	close(socket_fd);
1391 	return err;
1392 }
1393 
1394 int main(int argc, char *argv[])
1395 {
1396 	struct memory_buffer *mem;
1397 	int is_server = 0, opt;
1398 	int ret, err = 1;
1399 
1400 	while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:z:")) != -1) {
1401 		switch (opt) {
1402 		case 'l':
1403 			is_server = 1;
1404 			break;
1405 		case 's':
1406 			server_ip = optarg;
1407 			break;
1408 		case 'c':
1409 			client_ip = optarg;
1410 			break;
1411 		case 'p':
1412 			port = optarg;
1413 			break;
1414 		case 'v':
1415 			do_validation = atoll(optarg);
1416 			break;
1417 		case 'q':
1418 			num_queues = atoi(optarg);
1419 			break;
1420 		case 't':
1421 			start_queue = atoi(optarg);
1422 			break;
1423 		case 'f':
1424 			ifname = optarg;
1425 			break;
1426 		case 'z':
1427 			max_chunk = atoi(optarg);
1428 			break;
1429 		case '?':
1430 			fprintf(stderr, "unknown option: %c\n", optopt);
1431 			break;
1432 		}
1433 	}
1434 
1435 	if (!ifname) {
1436 		pr_err("Missing -f argument");
1437 		return 1;
1438 	}
1439 
1440 	ifindex = if_nametoindex(ifname);
1441 
1442 	fprintf(stderr, "using ifindex=%u\n", ifindex);
1443 
1444 	if (!server_ip && !client_ip) {
1445 		if (start_queue < 0 && num_queues < 0) {
1446 			num_queues = rxq_num(ifindex);
1447 			if (num_queues < 0) {
1448 				pr_err("couldn't detect number of queues");
1449 				return 1;
1450 			}
1451 			if (num_queues < 2) {
1452 				pr_err("number of device queues is too low");
1453 				return 1;
1454 			}
1455 			/* make sure can bind to multiple queues */
1456 			start_queue = num_queues / 2;
1457 			num_queues /= 2;
1458 		}
1459 
1460 		if (start_queue < 0 || num_queues < 0) {
1461 			pr_err("Both -t and -q are required");
1462 			return 1;
1463 		}
1464 
1465 		return run_devmem_tests();
1466 	}
1467 
1468 	if (start_queue < 0 && num_queues < 0) {
1469 		num_queues = rxq_num(ifindex);
1470 		if (num_queues < 2) {
1471 			pr_err("number of device queues is too low");
1472 			return 1;
1473 		}
1474 
1475 		num_queues = 1;
1476 		start_queue = rxq_num(ifindex) - num_queues;
1477 
1478 		if (start_queue < 0) {
1479 			pr_err("couldn't detect number of queues");
1480 			return 1;
1481 		}
1482 
1483 		fprintf(stderr, "using queues %d..%d\n", start_queue, start_queue + num_queues);
1484 	}
1485 
1486 	for (; optind < argc; optind++)
1487 		fprintf(stderr, "extra arguments: %s\n", argv[optind]);
1488 
1489 	if (start_queue < 0) {
1490 		pr_err("Missing -t argument");
1491 		return 1;
1492 	}
1493 
1494 	if (num_queues < 0) {
1495 		pr_err("Missing -q argument");
1496 		return 1;
1497 	}
1498 
1499 	if (!server_ip) {
1500 		pr_err("Missing -s argument");
1501 		return 1;
1502 	}
1503 
1504 	if (!port) {
1505 		pr_err("Missing -p argument");
1506 		return 1;
1507 	}
1508 
1509 	mem = provider->alloc(getpagesize() * NUM_PAGES);
1510 	if (!mem) {
1511 		pr_err("Failed to allocate memory buffer");
1512 		return 1;
1513 	}
1514 
1515 	ret = is_server ? do_server(mem) : do_client(mem);
1516 	if (ret)
1517 		goto err_free_mem;
1518 
1519 	err = 0;
1520 
1521 err_free_mem:
1522 	provider->free(mem);
1523 	return err;
1524 }
1525