xref: /linux/tools/testing/vsock/vsock_test.c (revision 0a80e38d0fe1fe7b59c1e93ad908c4148a15926a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vsock_test - vsock.ko test suite
4  *
5  * Copyright (C) 2017 Red Hat, Inc.
6  *
7  * Author: Stefan Hajnoczi <stefanha@redhat.com>
8  */
9 
10 #include <getopt.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <errno.h>
15 #include <unistd.h>
16 #include <linux/kernel.h>
17 #include <sys/types.h>
18 #include <sys/socket.h>
19 #include <time.h>
20 #include <sys/mman.h>
21 #include <poll.h>
22 #include <signal.h>
23 #include <sys/ioctl.h>
24 #include <linux/time64.h>
25 #include <pthread.h>
26 #include <fcntl.h>
27 #include <linux/sockios.h>
28 
29 #include "vsock_test_zerocopy.h"
30 #include "timeout.h"
31 #include "control.h"
32 #include "util.h"
33 
34 /* Basic messages for control_writeulong(), control_readulong() */
35 #define CONTROL_CONTINUE	1
36 #define CONTROL_DONE		0
37 
test_stream_connection_reset(const struct test_opts * opts)38 static void test_stream_connection_reset(const struct test_opts *opts)
39 {
40 	union {
41 		struct sockaddr sa;
42 		struct sockaddr_vm svm;
43 	} addr = {
44 		.svm = {
45 			.svm_family = AF_VSOCK,
46 			.svm_port = opts->peer_port,
47 			.svm_cid = opts->peer_cid,
48 		},
49 	};
50 	int ret;
51 	int fd;
52 
53 	fd = socket(AF_VSOCK, SOCK_STREAM, 0);
54 
55 	timeout_begin(TIMEOUT);
56 	do {
57 		ret = connect(fd, &addr.sa, sizeof(addr.svm));
58 		timeout_check("connect");
59 	} while (ret < 0 && errno == EINTR);
60 	timeout_end();
61 
62 	if (ret != -1) {
63 		fprintf(stderr, "expected connect(2) failure, got %d\n", ret);
64 		exit(EXIT_FAILURE);
65 	}
66 	if (errno != ECONNRESET) {
67 		fprintf(stderr, "unexpected connect(2) errno %d\n", errno);
68 		exit(EXIT_FAILURE);
69 	}
70 
71 	close(fd);
72 }
73 
test_stream_bind_only_client(const struct test_opts * opts)74 static void test_stream_bind_only_client(const struct test_opts *opts)
75 {
76 	union {
77 		struct sockaddr sa;
78 		struct sockaddr_vm svm;
79 	} addr = {
80 		.svm = {
81 			.svm_family = AF_VSOCK,
82 			.svm_port = opts->peer_port,
83 			.svm_cid = opts->peer_cid,
84 		},
85 	};
86 	int ret;
87 	int fd;
88 
89 	/* Wait for the server to be ready */
90 	control_expectln("BIND");
91 
92 	fd = socket(AF_VSOCK, SOCK_STREAM, 0);
93 
94 	timeout_begin(TIMEOUT);
95 	do {
96 		ret = connect(fd, &addr.sa, sizeof(addr.svm));
97 		timeout_check("connect");
98 	} while (ret < 0 && errno == EINTR);
99 	timeout_end();
100 
101 	if (ret != -1) {
102 		fprintf(stderr, "expected connect(2) failure, got %d\n", ret);
103 		exit(EXIT_FAILURE);
104 	}
105 	if (errno != ECONNRESET) {
106 		fprintf(stderr, "unexpected connect(2) errno %d\n", errno);
107 		exit(EXIT_FAILURE);
108 	}
109 
110 	/* Notify the server that the client has finished */
111 	control_writeln("DONE");
112 
113 	close(fd);
114 }
115 
test_stream_bind_only_server(const struct test_opts * opts)116 static void test_stream_bind_only_server(const struct test_opts *opts)
117 {
118 	int fd;
119 
120 	fd = vsock_bind(VMADDR_CID_ANY, opts->peer_port, SOCK_STREAM);
121 
122 	/* Notify the client that the server is ready */
123 	control_writeln("BIND");
124 
125 	/* Wait for the client to finish */
126 	control_expectln("DONE");
127 
128 	close(fd);
129 }
130 
test_stream_client_close_client(const struct test_opts * opts)131 static void test_stream_client_close_client(const struct test_opts *opts)
132 {
133 	int fd;
134 
135 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
136 	if (fd < 0) {
137 		perror("connect");
138 		exit(EXIT_FAILURE);
139 	}
140 
141 	send_byte(fd, 1, 0);
142 	close(fd);
143 }
144 
test_stream_client_close_server(const struct test_opts * opts)145 static void test_stream_client_close_server(const struct test_opts *opts)
146 {
147 	int fd;
148 
149 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
150 	if (fd < 0) {
151 		perror("accept");
152 		exit(EXIT_FAILURE);
153 	}
154 
155 	/* Wait for the remote to close the connection, before check
156 	 * -EPIPE error on send.
157 	 */
158 	vsock_wait_remote_close(fd);
159 
160 	send_byte(fd, -EPIPE, 0);
161 	recv_byte(fd, 1, 0);
162 	recv_byte(fd, 0, 0);
163 	close(fd);
164 }
165 
test_stream_server_close_client(const struct test_opts * opts)166 static void test_stream_server_close_client(const struct test_opts *opts)
167 {
168 	int fd;
169 
170 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
171 	if (fd < 0) {
172 		perror("connect");
173 		exit(EXIT_FAILURE);
174 	}
175 
176 	/* Wait for the remote to close the connection, before check
177 	 * -EPIPE error on send.
178 	 */
179 	vsock_wait_remote_close(fd);
180 
181 	send_byte(fd, -EPIPE, 0);
182 	recv_byte(fd, 1, 0);
183 	recv_byte(fd, 0, 0);
184 	close(fd);
185 }
186 
test_stream_server_close_server(const struct test_opts * opts)187 static void test_stream_server_close_server(const struct test_opts *opts)
188 {
189 	int fd;
190 
191 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
192 	if (fd < 0) {
193 		perror("accept");
194 		exit(EXIT_FAILURE);
195 	}
196 
197 	send_byte(fd, 1, 0);
198 	close(fd);
199 }
200 
201 /* With the standard socket sizes, VMCI is able to support about 100
202  * concurrent stream connections.
203  */
204 #define MULTICONN_NFDS 100
205 
test_stream_multiconn_client(const struct test_opts * opts)206 static void test_stream_multiconn_client(const struct test_opts *opts)
207 {
208 	int fds[MULTICONN_NFDS];
209 	int i;
210 
211 	for (i = 0; i < MULTICONN_NFDS; i++) {
212 		fds[i] = vsock_stream_connect(opts->peer_cid, opts->peer_port);
213 		if (fds[i] < 0) {
214 			perror("connect");
215 			exit(EXIT_FAILURE);
216 		}
217 	}
218 
219 	for (i = 0; i < MULTICONN_NFDS; i++) {
220 		if (i % 2)
221 			recv_byte(fds[i], 1, 0);
222 		else
223 			send_byte(fds[i], 1, 0);
224 	}
225 
226 	for (i = 0; i < MULTICONN_NFDS; i++)
227 		close(fds[i]);
228 }
229 
test_stream_multiconn_server(const struct test_opts * opts)230 static void test_stream_multiconn_server(const struct test_opts *opts)
231 {
232 	int fds[MULTICONN_NFDS];
233 	int i;
234 
235 	for (i = 0; i < MULTICONN_NFDS; i++) {
236 		fds[i] = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
237 		if (fds[i] < 0) {
238 			perror("accept");
239 			exit(EXIT_FAILURE);
240 		}
241 	}
242 
243 	for (i = 0; i < MULTICONN_NFDS; i++) {
244 		if (i % 2)
245 			send_byte(fds[i], 1, 0);
246 		else
247 			recv_byte(fds[i], 1, 0);
248 	}
249 
250 	for (i = 0; i < MULTICONN_NFDS; i++)
251 		close(fds[i]);
252 }
253 
254 #define MSG_PEEK_BUF_LEN 64
255 
test_msg_peek_client(const struct test_opts * opts,bool seqpacket)256 static void test_msg_peek_client(const struct test_opts *opts,
257 				 bool seqpacket)
258 {
259 	unsigned char buf[MSG_PEEK_BUF_LEN];
260 	int fd;
261 	int i;
262 
263 	if (seqpacket)
264 		fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
265 	else
266 		fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
267 
268 	if (fd < 0) {
269 		perror("connect");
270 		exit(EXIT_FAILURE);
271 	}
272 
273 	for (i = 0; i < sizeof(buf); i++)
274 		buf[i] = rand() & 0xFF;
275 
276 	control_expectln("SRVREADY");
277 
278 	send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
279 
280 	close(fd);
281 }
282 
test_msg_peek_server(const struct test_opts * opts,bool seqpacket)283 static void test_msg_peek_server(const struct test_opts *opts,
284 				 bool seqpacket)
285 {
286 	unsigned char buf_half[MSG_PEEK_BUF_LEN / 2];
287 	unsigned char buf_normal[MSG_PEEK_BUF_LEN];
288 	unsigned char buf_peek[MSG_PEEK_BUF_LEN];
289 	int fd;
290 
291 	if (seqpacket)
292 		fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
293 	else
294 		fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
295 
296 	if (fd < 0) {
297 		perror("accept");
298 		exit(EXIT_FAILURE);
299 	}
300 
301 	/* Peek from empty socket. */
302 	recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK | MSG_DONTWAIT,
303 		 -EAGAIN);
304 
305 	control_writeln("SRVREADY");
306 
307 	/* Peek part of data. */
308 	recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK, sizeof(buf_half));
309 
310 	/* Peek whole data. */
311 	recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK, sizeof(buf_peek));
312 
313 	/* Compare partial and full peek. */
314 	if (memcmp(buf_half, buf_peek, sizeof(buf_half))) {
315 		fprintf(stderr, "Partial peek data mismatch\n");
316 		exit(EXIT_FAILURE);
317 	}
318 
319 	if (seqpacket) {
320 		/* This type of socket supports MSG_TRUNC flag,
321 		 * so check it with MSG_PEEK. We must get length
322 		 * of the message.
323 		 */
324 		recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK | MSG_TRUNC,
325 			 sizeof(buf_peek));
326 	}
327 
328 	recv_buf(fd, buf_normal, sizeof(buf_normal), 0, sizeof(buf_normal));
329 
330 	/* Compare full peek and normal read. */
331 	if (memcmp(buf_peek, buf_normal, sizeof(buf_peek))) {
332 		fprintf(stderr, "Full peek data mismatch\n");
333 		exit(EXIT_FAILURE);
334 	}
335 
336 	close(fd);
337 }
338 
test_stream_msg_peek_client(const struct test_opts * opts)339 static void test_stream_msg_peek_client(const struct test_opts *opts)
340 {
341 	return test_msg_peek_client(opts, false);
342 }
343 
test_stream_msg_peek_server(const struct test_opts * opts)344 static void test_stream_msg_peek_server(const struct test_opts *opts)
345 {
346 	return test_msg_peek_server(opts, false);
347 }
348 
349 #define SOCK_BUF_SIZE (2 * 1024 * 1024)
350 #define SOCK_BUF_SIZE_SMALL (64 * 1024)
351 #define MAX_MSG_PAGES 4
352 
test_seqpacket_msg_bounds_client(const struct test_opts * opts)353 static void test_seqpacket_msg_bounds_client(const struct test_opts *opts)
354 {
355 	unsigned long long sock_buf_size;
356 	unsigned long curr_hash;
357 	size_t max_msg_size;
358 	int page_size;
359 	int msg_count;
360 	int fd;
361 
362 	fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
363 	if (fd < 0) {
364 		perror("connect");
365 		exit(EXIT_FAILURE);
366 	}
367 
368 	sock_buf_size = SOCK_BUF_SIZE;
369 
370 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
371 			     sock_buf_size,
372 			     "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
373 
374 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
375 			     sock_buf_size,
376 			     "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
377 
378 	/* Wait, until receiver sets buffer size. */
379 	control_expectln("SRVREADY");
380 
381 	curr_hash = 0;
382 	page_size = getpagesize();
383 	max_msg_size = MAX_MSG_PAGES * page_size;
384 	msg_count = SOCK_BUF_SIZE / max_msg_size;
385 
386 	for (int i = 0; i < msg_count; i++) {
387 		size_t buf_size;
388 		int flags;
389 		void *buf;
390 
391 		/* Use "small" buffers and "big" buffers. */
392 		if (i & 1)
393 			buf_size = page_size +
394 					(rand() % (max_msg_size - page_size));
395 		else
396 			buf_size = 1 + (rand() % page_size);
397 
398 		buf = malloc(buf_size);
399 
400 		if (!buf) {
401 			perror("malloc");
402 			exit(EXIT_FAILURE);
403 		}
404 
405 		memset(buf, rand() & 0xff, buf_size);
406 		/* Set at least one MSG_EOR + some random. */
407 		if (i == (msg_count / 2) || (rand() & 1)) {
408 			flags = MSG_EOR;
409 			curr_hash++;
410 		} else {
411 			flags = 0;
412 		}
413 
414 		send_buf(fd, buf, buf_size, flags, buf_size);
415 
416 		/*
417 		 * Hash sum is computed at both client and server in
418 		 * the same way:
419 		 * H += hash('message data')
420 		 * Such hash "controls" both data integrity and message
421 		 * bounds. After data exchange, both sums are compared
422 		 * using control socket, and if message bounds wasn't
423 		 * broken - two values must be equal.
424 		 */
425 		curr_hash += hash_djb2(buf, buf_size);
426 		free(buf);
427 	}
428 
429 	control_writeln("SENDDONE");
430 	control_writeulong(curr_hash);
431 	close(fd);
432 }
433 
test_seqpacket_msg_bounds_server(const struct test_opts * opts)434 static void test_seqpacket_msg_bounds_server(const struct test_opts *opts)
435 {
436 	unsigned long long sock_buf_size;
437 	unsigned long remote_hash;
438 	unsigned long curr_hash;
439 	int fd;
440 	struct msghdr msg = {0};
441 	struct iovec iov = {0};
442 
443 	fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
444 	if (fd < 0) {
445 		perror("accept");
446 		exit(EXIT_FAILURE);
447 	}
448 
449 	sock_buf_size = SOCK_BUF_SIZE;
450 
451 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
452 			     sock_buf_size,
453 			     "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
454 
455 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
456 			     sock_buf_size,
457 			     "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
458 
459 	/* Ready to receive data. */
460 	control_writeln("SRVREADY");
461 	/* Wait, until peer sends whole data. */
462 	control_expectln("SENDDONE");
463 	iov.iov_len = MAX_MSG_PAGES * getpagesize();
464 	iov.iov_base = malloc(iov.iov_len);
465 	if (!iov.iov_base) {
466 		perror("malloc");
467 		exit(EXIT_FAILURE);
468 	}
469 
470 	msg.msg_iov = &iov;
471 	msg.msg_iovlen = 1;
472 
473 	curr_hash = 0;
474 
475 	while (1) {
476 		ssize_t recv_size;
477 
478 		recv_size = recvmsg(fd, &msg, 0);
479 
480 		if (!recv_size)
481 			break;
482 
483 		if (recv_size < 0) {
484 			perror("recvmsg");
485 			exit(EXIT_FAILURE);
486 		}
487 
488 		if (msg.msg_flags & MSG_EOR)
489 			curr_hash++;
490 
491 		curr_hash += hash_djb2(msg.msg_iov[0].iov_base, recv_size);
492 	}
493 
494 	free(iov.iov_base);
495 	close(fd);
496 	remote_hash = control_readulong();
497 
498 	if (curr_hash != remote_hash) {
499 		fprintf(stderr, "Message bounds broken\n");
500 		exit(EXIT_FAILURE);
501 	}
502 }
503 
504 #define MESSAGE_TRUNC_SZ 32
test_seqpacket_msg_trunc_client(const struct test_opts * opts)505 static void test_seqpacket_msg_trunc_client(const struct test_opts *opts)
506 {
507 	int fd;
508 	char buf[MESSAGE_TRUNC_SZ];
509 
510 	fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
511 	if (fd < 0) {
512 		perror("connect");
513 		exit(EXIT_FAILURE);
514 	}
515 
516 	send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
517 
518 	control_writeln("SENDDONE");
519 	close(fd);
520 }
521 
test_seqpacket_msg_trunc_server(const struct test_opts * opts)522 static void test_seqpacket_msg_trunc_server(const struct test_opts *opts)
523 {
524 	int fd;
525 	char buf[MESSAGE_TRUNC_SZ / 2];
526 	struct msghdr msg = {0};
527 	struct iovec iov = {0};
528 
529 	fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
530 	if (fd < 0) {
531 		perror("accept");
532 		exit(EXIT_FAILURE);
533 	}
534 
535 	control_expectln("SENDDONE");
536 	iov.iov_base = buf;
537 	iov.iov_len = sizeof(buf);
538 	msg.msg_iov = &iov;
539 	msg.msg_iovlen = 1;
540 
541 	ssize_t ret = recvmsg(fd, &msg, MSG_TRUNC);
542 
543 	if (ret != MESSAGE_TRUNC_SZ) {
544 		printf("%zi\n", ret);
545 		perror("MSG_TRUNC doesn't work");
546 		exit(EXIT_FAILURE);
547 	}
548 
549 	if (!(msg.msg_flags & MSG_TRUNC)) {
550 		fprintf(stderr, "MSG_TRUNC expected\n");
551 		exit(EXIT_FAILURE);
552 	}
553 
554 	close(fd);
555 }
556 
current_nsec(void)557 static time_t current_nsec(void)
558 {
559 	struct timespec ts;
560 
561 	if (clock_gettime(CLOCK_REALTIME, &ts)) {
562 		perror("clock_gettime(3) failed");
563 		exit(EXIT_FAILURE);
564 	}
565 
566 	return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec;
567 }
568 
569 #define RCVTIMEO_TIMEOUT_SEC 1
570 #define READ_OVERHEAD_NSEC 250000000 /* 0.25 sec */
571 
test_seqpacket_timeout_client(const struct test_opts * opts)572 static void test_seqpacket_timeout_client(const struct test_opts *opts)
573 {
574 	int fd;
575 	struct timeval tv;
576 	char dummy;
577 	time_t read_enter_ns;
578 	time_t read_overhead_ns;
579 
580 	fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
581 	if (fd < 0) {
582 		perror("connect");
583 		exit(EXIT_FAILURE);
584 	}
585 
586 	tv.tv_sec = RCVTIMEO_TIMEOUT_SEC;
587 	tv.tv_usec = 0;
588 
589 	setsockopt_timeval_check(fd, SOL_SOCKET, SO_RCVTIMEO, tv,
590 				 "setsockopt(SO_RCVTIMEO)");
591 
592 	read_enter_ns = current_nsec();
593 
594 	if (read(fd, &dummy, sizeof(dummy)) != -1) {
595 		fprintf(stderr,
596 			"expected 'dummy' read(2) failure\n");
597 		exit(EXIT_FAILURE);
598 	}
599 
600 	if (errno != EAGAIN) {
601 		perror("EAGAIN expected");
602 		exit(EXIT_FAILURE);
603 	}
604 
605 	read_overhead_ns = current_nsec() - read_enter_ns -
606 			   NSEC_PER_SEC * RCVTIMEO_TIMEOUT_SEC;
607 
608 	if (read_overhead_ns > READ_OVERHEAD_NSEC) {
609 		fprintf(stderr,
610 			"too much time in read(2), %lu > %i ns\n",
611 			read_overhead_ns, READ_OVERHEAD_NSEC);
612 		exit(EXIT_FAILURE);
613 	}
614 
615 	control_writeln("WAITDONE");
616 	close(fd);
617 }
618 
test_seqpacket_timeout_server(const struct test_opts * opts)619 static void test_seqpacket_timeout_server(const struct test_opts *opts)
620 {
621 	int fd;
622 
623 	fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
624 	if (fd < 0) {
625 		perror("accept");
626 		exit(EXIT_FAILURE);
627 	}
628 
629 	control_expectln("WAITDONE");
630 	close(fd);
631 }
632 
test_seqpacket_bigmsg_client(const struct test_opts * opts)633 static void test_seqpacket_bigmsg_client(const struct test_opts *opts)
634 {
635 	unsigned long long sock_buf_size;
636 	size_t buf_size;
637 	socklen_t len;
638 	void *data;
639 	int fd;
640 
641 	len = sizeof(sock_buf_size);
642 
643 	fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
644 	if (fd < 0) {
645 		perror("connect");
646 		exit(EXIT_FAILURE);
647 	}
648 
649 	if (getsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
650 		       &sock_buf_size, &len)) {
651 		perror("getsockopt");
652 		exit(EXIT_FAILURE);
653 	}
654 
655 	sock_buf_size++;
656 
657 	/* size_t can be < unsigned long long */
658 	buf_size = (size_t)sock_buf_size;
659 	if (buf_size != sock_buf_size) {
660 		fprintf(stderr, "Returned BUFFER_SIZE too large\n");
661 		exit(EXIT_FAILURE);
662 	}
663 
664 	data = malloc(buf_size);
665 	if (!data) {
666 		perror("malloc");
667 		exit(EXIT_FAILURE);
668 	}
669 
670 	send_buf(fd, data, buf_size, 0, -EMSGSIZE);
671 
672 	control_writeln("CLISENT");
673 
674 	free(data);
675 	close(fd);
676 }
677 
test_seqpacket_bigmsg_server(const struct test_opts * opts)678 static void test_seqpacket_bigmsg_server(const struct test_opts *opts)
679 {
680 	int fd;
681 
682 	fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
683 	if (fd < 0) {
684 		perror("accept");
685 		exit(EXIT_FAILURE);
686 	}
687 
688 	control_expectln("CLISENT");
689 
690 	close(fd);
691 }
692 
693 #define BUF_PATTERN_1 'a'
694 #define BUF_PATTERN_2 'b'
695 
test_seqpacket_invalid_rec_buffer_client(const struct test_opts * opts)696 static void test_seqpacket_invalid_rec_buffer_client(const struct test_opts *opts)
697 {
698 	int fd;
699 	unsigned char *buf1;
700 	unsigned char *buf2;
701 	int buf_size = getpagesize() * 3;
702 
703 	fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
704 	if (fd < 0) {
705 		perror("connect");
706 		exit(EXIT_FAILURE);
707 	}
708 
709 	buf1 = malloc(buf_size);
710 	if (!buf1) {
711 		perror("'malloc()' for 'buf1'");
712 		exit(EXIT_FAILURE);
713 	}
714 
715 	buf2 = malloc(buf_size);
716 	if (!buf2) {
717 		perror("'malloc()' for 'buf2'");
718 		exit(EXIT_FAILURE);
719 	}
720 
721 	memset(buf1, BUF_PATTERN_1, buf_size);
722 	memset(buf2, BUF_PATTERN_2, buf_size);
723 
724 	send_buf(fd, buf1, buf_size, 0, buf_size);
725 
726 	send_buf(fd, buf2, buf_size, 0, buf_size);
727 
728 	close(fd);
729 }
730 
test_seqpacket_invalid_rec_buffer_server(const struct test_opts * opts)731 static void test_seqpacket_invalid_rec_buffer_server(const struct test_opts *opts)
732 {
733 	int fd;
734 	unsigned char *broken_buf;
735 	unsigned char *valid_buf;
736 	int page_size = getpagesize();
737 	int buf_size = page_size * 3;
738 	ssize_t res;
739 	int prot = PROT_READ | PROT_WRITE;
740 	int flags = MAP_PRIVATE | MAP_ANONYMOUS;
741 	int i;
742 
743 	fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
744 	if (fd < 0) {
745 		perror("accept");
746 		exit(EXIT_FAILURE);
747 	}
748 
749 	/* Setup first buffer. */
750 	broken_buf = mmap(NULL, buf_size, prot, flags, -1, 0);
751 	if (broken_buf == MAP_FAILED) {
752 		perror("mmap for 'broken_buf'");
753 		exit(EXIT_FAILURE);
754 	}
755 
756 	/* Unmap "hole" in buffer. */
757 	if (munmap(broken_buf + page_size, page_size)) {
758 		perror("'broken_buf' setup");
759 		exit(EXIT_FAILURE);
760 	}
761 
762 	valid_buf = mmap(NULL, buf_size, prot, flags, -1, 0);
763 	if (valid_buf == MAP_FAILED) {
764 		perror("mmap for 'valid_buf'");
765 		exit(EXIT_FAILURE);
766 	}
767 
768 	/* Try to fill buffer with unmapped middle. */
769 	res = read(fd, broken_buf, buf_size);
770 	if (res != -1) {
771 		fprintf(stderr,
772 			"expected 'broken_buf' read(2) failure, got %zi\n",
773 			res);
774 		exit(EXIT_FAILURE);
775 	}
776 
777 	if (errno != EFAULT) {
778 		perror("unexpected errno of 'broken_buf'");
779 		exit(EXIT_FAILURE);
780 	}
781 
782 	/* Try to fill valid buffer. */
783 	res = read(fd, valid_buf, buf_size);
784 	if (res < 0) {
785 		perror("unexpected 'valid_buf' read(2) failure");
786 		exit(EXIT_FAILURE);
787 	}
788 
789 	if (res != buf_size) {
790 		fprintf(stderr,
791 			"invalid 'valid_buf' read(2), expected %i, got %zi\n",
792 			buf_size, res);
793 		exit(EXIT_FAILURE);
794 	}
795 
796 	for (i = 0; i < buf_size; i++) {
797 		if (valid_buf[i] != BUF_PATTERN_2) {
798 			fprintf(stderr,
799 				"invalid pattern for 'valid_buf' at %i, expected %hhX, got %hhX\n",
800 				i, BUF_PATTERN_2, valid_buf[i]);
801 			exit(EXIT_FAILURE);
802 		}
803 	}
804 
805 	/* Unmap buffers. */
806 	munmap(broken_buf, page_size);
807 	munmap(broken_buf + page_size * 2, page_size);
808 	munmap(valid_buf, buf_size);
809 	close(fd);
810 }
811 
812 #define RCVLOWAT_BUF_SIZE 128
813 
test_stream_poll_rcvlowat_server(const struct test_opts * opts)814 static void test_stream_poll_rcvlowat_server(const struct test_opts *opts)
815 {
816 	int fd;
817 	int i;
818 
819 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
820 	if (fd < 0) {
821 		perror("accept");
822 		exit(EXIT_FAILURE);
823 	}
824 
825 	/* Send 1 byte. */
826 	send_byte(fd, 1, 0);
827 
828 	control_writeln("SRVSENT");
829 
830 	/* Wait until client is ready to receive rest of data. */
831 	control_expectln("CLNSENT");
832 
833 	for (i = 0; i < RCVLOWAT_BUF_SIZE - 1; i++)
834 		send_byte(fd, 1, 0);
835 
836 	/* Keep socket in active state. */
837 	control_expectln("POLLDONE");
838 
839 	close(fd);
840 }
841 
test_stream_poll_rcvlowat_client(const struct test_opts * opts)842 static void test_stream_poll_rcvlowat_client(const struct test_opts *opts)
843 {
844 	int lowat_val = RCVLOWAT_BUF_SIZE;
845 	char buf[RCVLOWAT_BUF_SIZE];
846 	struct pollfd fds;
847 	short poll_flags;
848 	int fd;
849 
850 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
851 	if (fd < 0) {
852 		perror("connect");
853 		exit(EXIT_FAILURE);
854 	}
855 
856 	setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT,
857 			     lowat_val, "setsockopt(SO_RCVLOWAT)");
858 
859 	control_expectln("SRVSENT");
860 
861 	/* At this point, server sent 1 byte. */
862 	fds.fd = fd;
863 	poll_flags = POLLIN | POLLRDNORM;
864 	fds.events = poll_flags;
865 
866 	/* Try to wait for 1 sec. */
867 	if (poll(&fds, 1, 1000) < 0) {
868 		perror("poll");
869 		exit(EXIT_FAILURE);
870 	}
871 
872 	/* poll() must return nothing. */
873 	if (fds.revents) {
874 		fprintf(stderr, "Unexpected poll result %hx\n",
875 			fds.revents);
876 		exit(EXIT_FAILURE);
877 	}
878 
879 	/* Tell server to send rest of data. */
880 	control_writeln("CLNSENT");
881 
882 	/* Poll for data. */
883 	if (poll(&fds, 1, 10000) < 0) {
884 		perror("poll");
885 		exit(EXIT_FAILURE);
886 	}
887 
888 	/* Only these two bits are expected. */
889 	if (fds.revents != poll_flags) {
890 		fprintf(stderr, "Unexpected poll result %hx\n",
891 			fds.revents);
892 		exit(EXIT_FAILURE);
893 	}
894 
895 	/* Use MSG_DONTWAIT, if call is going to wait, EAGAIN
896 	 * will be returned.
897 	 */
898 	recv_buf(fd, buf, sizeof(buf), MSG_DONTWAIT, RCVLOWAT_BUF_SIZE);
899 
900 	control_writeln("POLLDONE");
901 
902 	close(fd);
903 }
904 
905 #define INV_BUF_TEST_DATA_LEN 512
906 
test_inv_buf_client(const struct test_opts * opts,bool stream)907 static void test_inv_buf_client(const struct test_opts *opts, bool stream)
908 {
909 	unsigned char data[INV_BUF_TEST_DATA_LEN] = {0};
910 	ssize_t expected_ret;
911 	int fd;
912 
913 	if (stream)
914 		fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
915 	else
916 		fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port);
917 
918 	if (fd < 0) {
919 		perror("connect");
920 		exit(EXIT_FAILURE);
921 	}
922 
923 	control_expectln("SENDDONE");
924 
925 	/* Use invalid buffer here. */
926 	recv_buf(fd, NULL, sizeof(data), 0, -EFAULT);
927 
928 	if (stream) {
929 		/* For SOCK_STREAM we must continue reading. */
930 		expected_ret = sizeof(data);
931 	} else {
932 		/* For SOCK_SEQPACKET socket's queue must be empty. */
933 		expected_ret = -EAGAIN;
934 	}
935 
936 	recv_buf(fd, data, sizeof(data), MSG_DONTWAIT, expected_ret);
937 
938 	control_writeln("DONE");
939 
940 	close(fd);
941 }
942 
test_inv_buf_server(const struct test_opts * opts,bool stream)943 static void test_inv_buf_server(const struct test_opts *opts, bool stream)
944 {
945 	unsigned char data[INV_BUF_TEST_DATA_LEN] = {0};
946 	int fd;
947 
948 	if (stream)
949 		fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
950 	else
951 		fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
952 
953 	if (fd < 0) {
954 		perror("accept");
955 		exit(EXIT_FAILURE);
956 	}
957 
958 	send_buf(fd, data, sizeof(data), 0, sizeof(data));
959 
960 	control_writeln("SENDDONE");
961 
962 	control_expectln("DONE");
963 
964 	close(fd);
965 }
966 
test_stream_inv_buf_client(const struct test_opts * opts)967 static void test_stream_inv_buf_client(const struct test_opts *opts)
968 {
969 	test_inv_buf_client(opts, true);
970 }
971 
test_stream_inv_buf_server(const struct test_opts * opts)972 static void test_stream_inv_buf_server(const struct test_opts *opts)
973 {
974 	test_inv_buf_server(opts, true);
975 }
976 
test_seqpacket_inv_buf_client(const struct test_opts * opts)977 static void test_seqpacket_inv_buf_client(const struct test_opts *opts)
978 {
979 	test_inv_buf_client(opts, false);
980 }
981 
test_seqpacket_inv_buf_server(const struct test_opts * opts)982 static void test_seqpacket_inv_buf_server(const struct test_opts *opts)
983 {
984 	test_inv_buf_server(opts, false);
985 }
986 
987 #define HELLO_STR "HELLO"
988 #define WORLD_STR "WORLD"
989 
test_stream_virtio_skb_merge_client(const struct test_opts * opts)990 static void test_stream_virtio_skb_merge_client(const struct test_opts *opts)
991 {
992 	int fd;
993 
994 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
995 	if (fd < 0) {
996 		perror("connect");
997 		exit(EXIT_FAILURE);
998 	}
999 
1000 	/* Send first skbuff. */
1001 	send_buf(fd, HELLO_STR, strlen(HELLO_STR), 0, strlen(HELLO_STR));
1002 
1003 	control_writeln("SEND0");
1004 	/* Peer reads part of first skbuff. */
1005 	control_expectln("REPLY0");
1006 
1007 	/* Send second skbuff, it will be appended to the first. */
1008 	send_buf(fd, WORLD_STR, strlen(WORLD_STR), 0, strlen(WORLD_STR));
1009 
1010 	control_writeln("SEND1");
1011 	/* Peer reads merged skbuff packet. */
1012 	control_expectln("REPLY1");
1013 
1014 	close(fd);
1015 }
1016 
test_stream_virtio_skb_merge_server(const struct test_opts * opts)1017 static void test_stream_virtio_skb_merge_server(const struct test_opts *opts)
1018 {
1019 	size_t read = 0, to_read;
1020 	unsigned char buf[64];
1021 	int fd;
1022 
1023 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1024 	if (fd < 0) {
1025 		perror("accept");
1026 		exit(EXIT_FAILURE);
1027 	}
1028 
1029 	control_expectln("SEND0");
1030 
1031 	/* Read skbuff partially. */
1032 	to_read = 2;
1033 	recv_buf(fd, buf + read, to_read, 0, to_read);
1034 	read += to_read;
1035 
1036 	control_writeln("REPLY0");
1037 	control_expectln("SEND1");
1038 
1039 	/* Read the rest of both buffers */
1040 	to_read = strlen(HELLO_STR WORLD_STR) - read;
1041 	recv_buf(fd, buf + read, to_read, 0, to_read);
1042 	read += to_read;
1043 
1044 	/* No more bytes should be there */
1045 	to_read = sizeof(buf) - read;
1046 	recv_buf(fd, buf + read, to_read, MSG_DONTWAIT, -EAGAIN);
1047 
1048 	if (memcmp(buf, HELLO_STR WORLD_STR, strlen(HELLO_STR WORLD_STR))) {
1049 		fprintf(stderr, "pattern mismatch\n");
1050 		exit(EXIT_FAILURE);
1051 	}
1052 
1053 	control_writeln("REPLY1");
1054 
1055 	close(fd);
1056 }
1057 
test_seqpacket_msg_peek_client(const struct test_opts * opts)1058 static void test_seqpacket_msg_peek_client(const struct test_opts *opts)
1059 {
1060 	return test_msg_peek_client(opts, true);
1061 }
1062 
test_seqpacket_msg_peek_server(const struct test_opts * opts)1063 static void test_seqpacket_msg_peek_server(const struct test_opts *opts)
1064 {
1065 	return test_msg_peek_server(opts, true);
1066 }
1067 
1068 static sig_atomic_t have_sigpipe;
1069 
sigpipe(int signo)1070 static void sigpipe(int signo)
1071 {
1072 	have_sigpipe = 1;
1073 }
1074 
1075 #define SEND_SLEEP_USEC (10 * 1000)
1076 
test_stream_check_sigpipe(int fd)1077 static void test_stream_check_sigpipe(int fd)
1078 {
1079 	ssize_t res;
1080 
1081 	have_sigpipe = 0;
1082 
1083 	/* When the other peer calls shutdown(SHUT_RD), there is a chance that
1084 	 * the send() call could occur before the message carrying the close
1085 	 * information arrives over the transport. In such cases, the send()
1086 	 * might still succeed. To avoid this race, let's retry the send() call
1087 	 * a few times, ensuring the test is more reliable.
1088 	 */
1089 	timeout_begin(TIMEOUT);
1090 	while(1) {
1091 		res = send(fd, "A", 1, 0);
1092 		if (res == -1 && errno != EINTR)
1093 			break;
1094 
1095 		/* Sleep a little before trying again to avoid flooding the
1096 		 * other peer and filling its receive buffer, causing
1097 		 * false-negative.
1098 		 */
1099 		timeout_usleep(SEND_SLEEP_USEC);
1100 		timeout_check("send");
1101 	}
1102 	timeout_end();
1103 
1104 	if (errno != EPIPE) {
1105 		fprintf(stderr, "unexpected send(2) errno %d\n", errno);
1106 		exit(EXIT_FAILURE);
1107 	}
1108 	if (!have_sigpipe) {
1109 		fprintf(stderr, "SIGPIPE expected\n");
1110 		exit(EXIT_FAILURE);
1111 	}
1112 
1113 	have_sigpipe = 0;
1114 
1115 	timeout_begin(TIMEOUT);
1116 	while(1) {
1117 		res = send(fd, "A", 1, MSG_NOSIGNAL);
1118 		if (res == -1 && errno != EINTR)
1119 			break;
1120 
1121 		timeout_usleep(SEND_SLEEP_USEC);
1122 		timeout_check("send");
1123 	}
1124 	timeout_end();
1125 
1126 	if (errno != EPIPE) {
1127 		fprintf(stderr, "unexpected send(2) errno %d\n", errno);
1128 		exit(EXIT_FAILURE);
1129 	}
1130 	if (have_sigpipe) {
1131 		fprintf(stderr, "SIGPIPE not expected\n");
1132 		exit(EXIT_FAILURE);
1133 	}
1134 }
1135 
test_stream_shutwr_client(const struct test_opts * opts)1136 static void test_stream_shutwr_client(const struct test_opts *opts)
1137 {
1138 	int fd;
1139 
1140 	struct sigaction act = {
1141 		.sa_handler = sigpipe,
1142 	};
1143 
1144 	sigaction(SIGPIPE, &act, NULL);
1145 
1146 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1147 	if (fd < 0) {
1148 		perror("connect");
1149 		exit(EXIT_FAILURE);
1150 	}
1151 
1152 	if (shutdown(fd, SHUT_WR)) {
1153 		perror("shutdown");
1154 		exit(EXIT_FAILURE);
1155 	}
1156 
1157 	test_stream_check_sigpipe(fd);
1158 
1159 	control_writeln("CLIENTDONE");
1160 
1161 	close(fd);
1162 }
1163 
test_stream_shutwr_server(const struct test_opts * opts)1164 static void test_stream_shutwr_server(const struct test_opts *opts)
1165 {
1166 	int fd;
1167 
1168 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1169 	if (fd < 0) {
1170 		perror("accept");
1171 		exit(EXIT_FAILURE);
1172 	}
1173 
1174 	control_expectln("CLIENTDONE");
1175 
1176 	close(fd);
1177 }
1178 
test_stream_shutrd_client(const struct test_opts * opts)1179 static void test_stream_shutrd_client(const struct test_opts *opts)
1180 {
1181 	int fd;
1182 
1183 	struct sigaction act = {
1184 		.sa_handler = sigpipe,
1185 	};
1186 
1187 	sigaction(SIGPIPE, &act, NULL);
1188 
1189 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1190 	if (fd < 0) {
1191 		perror("connect");
1192 		exit(EXIT_FAILURE);
1193 	}
1194 
1195 	control_expectln("SHUTRDDONE");
1196 
1197 	test_stream_check_sigpipe(fd);
1198 
1199 	control_writeln("CLIENTDONE");
1200 
1201 	close(fd);
1202 }
1203 
test_stream_shutrd_server(const struct test_opts * opts)1204 static void test_stream_shutrd_server(const struct test_opts *opts)
1205 {
1206 	int fd;
1207 
1208 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1209 	if (fd < 0) {
1210 		perror("accept");
1211 		exit(EXIT_FAILURE);
1212 	}
1213 
1214 	if (shutdown(fd, SHUT_RD)) {
1215 		perror("shutdown");
1216 		exit(EXIT_FAILURE);
1217 	}
1218 
1219 	control_writeln("SHUTRDDONE");
1220 	control_expectln("CLIENTDONE");
1221 
1222 	close(fd);
1223 }
1224 
test_double_bind_connect_server(const struct test_opts * opts)1225 static void test_double_bind_connect_server(const struct test_opts *opts)
1226 {
1227 	int listen_fd, client_fd, i;
1228 	struct sockaddr_vm sa_client;
1229 	socklen_t socklen_client = sizeof(sa_client);
1230 
1231 	listen_fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port);
1232 
1233 	for (i = 0; i < 2; i++) {
1234 		control_writeln("LISTENING");
1235 
1236 		timeout_begin(TIMEOUT);
1237 		do {
1238 			client_fd = accept(listen_fd, (struct sockaddr *)&sa_client,
1239 					   &socklen_client);
1240 			timeout_check("accept");
1241 		} while (client_fd < 0 && errno == EINTR);
1242 		timeout_end();
1243 
1244 		if (client_fd < 0) {
1245 			perror("accept");
1246 			exit(EXIT_FAILURE);
1247 		}
1248 
1249 		/* Waiting for remote peer to close connection */
1250 		vsock_wait_remote_close(client_fd);
1251 	}
1252 
1253 	close(listen_fd);
1254 }
1255 
test_double_bind_connect_client(const struct test_opts * opts)1256 static void test_double_bind_connect_client(const struct test_opts *opts)
1257 {
1258 	int i, client_fd;
1259 
1260 	for (i = 0; i < 2; i++) {
1261 		/* Wait until server is ready to accept a new connection */
1262 		control_expectln("LISTENING");
1263 
1264 		/* We use 'peer_port + 1' as "some" port for the 'bind()'
1265 		 * call. It is safe for overflow, but must be considered,
1266 		 * when running multiple test applications simultaneously
1267 		 * where 'peer-port' argument differs by 1.
1268 		 */
1269 		client_fd = vsock_bind_connect(opts->peer_cid, opts->peer_port,
1270 					       opts->peer_port + 1, SOCK_STREAM);
1271 
1272 		close(client_fd);
1273 	}
1274 }
1275 
1276 #define MSG_BUF_IOCTL_LEN 64
test_unsent_bytes_server(const struct test_opts * opts,int type)1277 static void test_unsent_bytes_server(const struct test_opts *opts, int type)
1278 {
1279 	unsigned char buf[MSG_BUF_IOCTL_LEN];
1280 	int client_fd;
1281 
1282 	client_fd = vsock_accept(VMADDR_CID_ANY, opts->peer_port, NULL, type);
1283 	if (client_fd < 0) {
1284 		perror("accept");
1285 		exit(EXIT_FAILURE);
1286 	}
1287 
1288 	recv_buf(client_fd, buf, sizeof(buf), 0, sizeof(buf));
1289 	control_writeln("RECEIVED");
1290 
1291 	close(client_fd);
1292 }
1293 
test_unsent_bytes_client(const struct test_opts * opts,int type)1294 static void test_unsent_bytes_client(const struct test_opts *opts, int type)
1295 {
1296 	unsigned char buf[MSG_BUF_IOCTL_LEN];
1297 	int fd;
1298 
1299 	fd = vsock_connect(opts->peer_cid, opts->peer_port, type);
1300 	if (fd < 0) {
1301 		perror("connect");
1302 		exit(EXIT_FAILURE);
1303 	}
1304 
1305 	for (int i = 0; i < sizeof(buf); i++)
1306 		buf[i] = rand() & 0xFF;
1307 
1308 	send_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
1309 	control_expectln("RECEIVED");
1310 
1311 	/* SIOCOUTQ isn't guaranteed to instantly track sent data. Even though
1312 	 * the "RECEIVED" message means that the other side has received the
1313 	 * data, there can be a delay in our kernel before updating the "unsent
1314 	 * bytes" counter. vsock_wait_sent() will repeat SIOCOUTQ until it
1315 	 * returns 0.
1316 	 */
1317 	if (!vsock_wait_sent(fd))
1318 		fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n");
1319 
1320 	close(fd);
1321 }
1322 
test_unread_bytes_server(const struct test_opts * opts,int type)1323 static void test_unread_bytes_server(const struct test_opts *opts, int type)
1324 {
1325 	unsigned char buf[MSG_BUF_IOCTL_LEN];
1326 	int client_fd;
1327 
1328 	client_fd = vsock_accept(VMADDR_CID_ANY, opts->peer_port, NULL, type);
1329 	if (client_fd < 0) {
1330 		perror("accept");
1331 		exit(EXIT_FAILURE);
1332 	}
1333 
1334 	for (int i = 0; i < sizeof(buf); i++)
1335 		buf[i] = rand() & 0xFF;
1336 
1337 	send_buf(client_fd, buf, sizeof(buf), 0, sizeof(buf));
1338 	control_writeln("SENT");
1339 
1340 	close(client_fd);
1341 }
1342 
test_unread_bytes_client(const struct test_opts * opts,int type)1343 static void test_unread_bytes_client(const struct test_opts *opts, int type)
1344 {
1345 	unsigned char buf[MSG_BUF_IOCTL_LEN];
1346 	int fd;
1347 
1348 	fd = vsock_connect(opts->peer_cid, opts->peer_port, type);
1349 	if (fd < 0) {
1350 		perror("connect");
1351 		exit(EXIT_FAILURE);
1352 	}
1353 
1354 	control_expectln("SENT");
1355 	/* The data has arrived but has not been read. The expected is
1356 	 * MSG_BUF_IOCTL_LEN.
1357 	 */
1358 	if (!vsock_ioctl_int(fd, SIOCINQ, MSG_BUF_IOCTL_LEN)) {
1359 		fprintf(stderr, "Test skipped, SIOCINQ not supported.\n");
1360 		goto out;
1361 	}
1362 
1363 	recv_buf(fd, buf, sizeof(buf), 0, sizeof(buf));
1364 	/* All data has been consumed, so the expected is 0. */
1365 	vsock_ioctl_int(fd, SIOCINQ, 0);
1366 
1367 out:
1368 	close(fd);
1369 }
1370 
test_stream_unsent_bytes_client(const struct test_opts * opts)1371 static void test_stream_unsent_bytes_client(const struct test_opts *opts)
1372 {
1373 	test_unsent_bytes_client(opts, SOCK_STREAM);
1374 }
1375 
test_stream_unsent_bytes_server(const struct test_opts * opts)1376 static void test_stream_unsent_bytes_server(const struct test_opts *opts)
1377 {
1378 	test_unsent_bytes_server(opts, SOCK_STREAM);
1379 }
1380 
test_seqpacket_unsent_bytes_client(const struct test_opts * opts)1381 static void test_seqpacket_unsent_bytes_client(const struct test_opts *opts)
1382 {
1383 	test_unsent_bytes_client(opts, SOCK_SEQPACKET);
1384 }
1385 
test_seqpacket_unsent_bytes_server(const struct test_opts * opts)1386 static void test_seqpacket_unsent_bytes_server(const struct test_opts *opts)
1387 {
1388 	test_unsent_bytes_server(opts, SOCK_SEQPACKET);
1389 }
1390 
test_stream_unread_bytes_client(const struct test_opts * opts)1391 static void test_stream_unread_bytes_client(const struct test_opts *opts)
1392 {
1393 	test_unread_bytes_client(opts, SOCK_STREAM);
1394 }
1395 
test_stream_unread_bytes_server(const struct test_opts * opts)1396 static void test_stream_unread_bytes_server(const struct test_opts *opts)
1397 {
1398 	test_unread_bytes_server(opts, SOCK_STREAM);
1399 }
1400 
test_seqpacket_unread_bytes_client(const struct test_opts * opts)1401 static void test_seqpacket_unread_bytes_client(const struct test_opts *opts)
1402 {
1403 	test_unread_bytes_client(opts, SOCK_SEQPACKET);
1404 }
1405 
test_seqpacket_unread_bytes_server(const struct test_opts * opts)1406 static void test_seqpacket_unread_bytes_server(const struct test_opts *opts)
1407 {
1408 	test_unread_bytes_server(opts, SOCK_SEQPACKET);
1409 }
1410 
1411 #define RCVLOWAT_CREDIT_UPD_BUF_SIZE	(1024 * 128)
1412 /* This define is the same as in 'include/linux/virtio_vsock.h':
1413  * it is used to decide when to send credit update message during
1414  * reading from rx queue of a socket. Value and its usage in
1415  * kernel is important for this test.
1416  */
1417 #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE	(1024 * 64)
1418 
test_stream_rcvlowat_def_cred_upd_client(const struct test_opts * opts)1419 static void test_stream_rcvlowat_def_cred_upd_client(const struct test_opts *opts)
1420 {
1421 	size_t buf_size;
1422 	void *buf;
1423 	int fd;
1424 
1425 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1426 	if (fd < 0) {
1427 		perror("connect");
1428 		exit(EXIT_FAILURE);
1429 	}
1430 
1431 	/* Send 1 byte more than peer's buffer size. */
1432 	buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE + 1;
1433 
1434 	buf = malloc(buf_size);
1435 	if (!buf) {
1436 		perror("malloc");
1437 		exit(EXIT_FAILURE);
1438 	}
1439 
1440 	/* Wait until peer sets needed buffer size. */
1441 	recv_byte(fd, 1, 0);
1442 
1443 	if (send(fd, buf, buf_size, 0) != buf_size) {
1444 		perror("send failed");
1445 		exit(EXIT_FAILURE);
1446 	}
1447 
1448 	free(buf);
1449 	close(fd);
1450 }
1451 
test_stream_credit_update_test(const struct test_opts * opts,bool low_rx_bytes_test)1452 static void test_stream_credit_update_test(const struct test_opts *opts,
1453 					   bool low_rx_bytes_test)
1454 {
1455 	int recv_buf_size;
1456 	struct pollfd fds;
1457 	size_t buf_size;
1458 	unsigned long long sock_buf_size;
1459 	void *buf;
1460 	int fd;
1461 
1462 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1463 	if (fd < 0) {
1464 		perror("accept");
1465 		exit(EXIT_FAILURE);
1466 	}
1467 
1468 	buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE;
1469 
1470 	/* size_t can be < unsigned long long */
1471 	sock_buf_size = buf_size;
1472 
1473 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
1474 			     sock_buf_size,
1475 			     "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
1476 
1477 	if (low_rx_bytes_test) {
1478 		/* Set new SO_RCVLOWAT here. This enables sending credit
1479 		 * update when number of bytes if our rx queue become <
1480 		 * SO_RCVLOWAT value.
1481 		 */
1482 		recv_buf_size = 1 + VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
1483 
1484 		setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT,
1485 				     recv_buf_size, "setsockopt(SO_RCVLOWAT)");
1486 	}
1487 
1488 	/* Send one dummy byte here, because 'setsockopt()' above also
1489 	 * sends special packet which tells sender to update our buffer
1490 	 * size. This 'send_byte()' will serialize such packet with data
1491 	 * reads in a loop below. Sender starts transmission only when
1492 	 * it receives this single byte.
1493 	 */
1494 	send_byte(fd, 1, 0);
1495 
1496 	buf = malloc(buf_size);
1497 	if (!buf) {
1498 		perror("malloc");
1499 		exit(EXIT_FAILURE);
1500 	}
1501 
1502 	/* Wait until there will be 128KB of data in rx queue. */
1503 	while (1) {
1504 		ssize_t res;
1505 
1506 		res = recv(fd, buf, buf_size, MSG_PEEK);
1507 		if (res == buf_size)
1508 			break;
1509 
1510 		if (res <= 0) {
1511 			fprintf(stderr, "unexpected 'recv()' return: %zi\n", res);
1512 			exit(EXIT_FAILURE);
1513 		}
1514 	}
1515 
1516 	/* There is 128KB of data in the socket's rx queue, dequeue first
1517 	 * 64KB, credit update is sent if 'low_rx_bytes_test' == true.
1518 	 * Otherwise, credit update is sent in 'if (!low_rx_bytes_test)'.
1519 	 */
1520 	recv_buf_size = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
1521 	recv_buf(fd, buf, recv_buf_size, 0, recv_buf_size);
1522 
1523 	if (!low_rx_bytes_test) {
1524 		recv_buf_size++;
1525 
1526 		/* Updating SO_RCVLOWAT will send credit update. */
1527 		setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT,
1528 				     recv_buf_size, "setsockopt(SO_RCVLOWAT)");
1529 	}
1530 
1531 	fds.fd = fd;
1532 	fds.events = POLLIN | POLLRDNORM | POLLERR |
1533 		     POLLRDHUP | POLLHUP;
1534 
1535 	/* This 'poll()' will return once we receive last byte
1536 	 * sent by client.
1537 	 */
1538 	if (poll(&fds, 1, -1) < 0) {
1539 		perror("poll");
1540 		exit(EXIT_FAILURE);
1541 	}
1542 
1543 	if (fds.revents & POLLERR) {
1544 		fprintf(stderr, "'poll()' error\n");
1545 		exit(EXIT_FAILURE);
1546 	}
1547 
1548 	if (fds.revents & (POLLIN | POLLRDNORM)) {
1549 		recv_buf(fd, buf, recv_buf_size, MSG_DONTWAIT, recv_buf_size);
1550 	} else {
1551 		/* These flags must be set, as there is at
1552 		 * least 64KB of data ready to read.
1553 		 */
1554 		fprintf(stderr, "POLLIN | POLLRDNORM expected\n");
1555 		exit(EXIT_FAILURE);
1556 	}
1557 
1558 	free(buf);
1559 	close(fd);
1560 }
1561 
test_stream_cred_upd_on_low_rx_bytes(const struct test_opts * opts)1562 static void test_stream_cred_upd_on_low_rx_bytes(const struct test_opts *opts)
1563 {
1564 	test_stream_credit_update_test(opts, true);
1565 }
1566 
test_stream_cred_upd_on_set_rcvlowat(const struct test_opts * opts)1567 static void test_stream_cred_upd_on_set_rcvlowat(const struct test_opts *opts)
1568 {
1569 	test_stream_credit_update_test(opts, false);
1570 }
1571 
1572 /* The goal of test leak_acceptq is to stress the race between connect() and
1573  * close(listener). Implementation of client/server loops boils down to:
1574  *
1575  * client                server
1576  * ------                ------
1577  * write(CONTINUE)
1578  *                       expect(CONTINUE)
1579  *                       listen()
1580  *                       write(LISTENING)
1581  * expect(LISTENING)
1582  * connect()             close()
1583  */
1584 #define ACCEPTQ_LEAK_RACE_TIMEOUT 2 /* seconds */
1585 
test_stream_leak_acceptq_client(const struct test_opts * opts)1586 static void test_stream_leak_acceptq_client(const struct test_opts *opts)
1587 {
1588 	time_t tout;
1589 	int fd;
1590 
1591 	tout = current_nsec() + ACCEPTQ_LEAK_RACE_TIMEOUT * NSEC_PER_SEC;
1592 	do {
1593 		control_writeulong(CONTROL_CONTINUE);
1594 
1595 		fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1596 		if (fd >= 0)
1597 			close(fd);
1598 	} while (current_nsec() < tout);
1599 
1600 	control_writeulong(CONTROL_DONE);
1601 }
1602 
1603 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */
test_stream_leak_acceptq_server(const struct test_opts * opts)1604 static void test_stream_leak_acceptq_server(const struct test_opts *opts)
1605 {
1606 	int fd;
1607 
1608 	while (control_readulong() == CONTROL_CONTINUE) {
1609 		fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port);
1610 		control_writeln("LISTENING");
1611 		close(fd);
1612 	}
1613 }
1614 
1615 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */
test_stream_msgzcopy_leak_errq_client(const struct test_opts * opts)1616 static void test_stream_msgzcopy_leak_errq_client(const struct test_opts *opts)
1617 {
1618 	struct pollfd fds = { 0 };
1619 	int fd;
1620 
1621 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1622 	if (fd < 0) {
1623 		perror("connect");
1624 		exit(EXIT_FAILURE);
1625 	}
1626 
1627 	enable_so_zerocopy_check(fd);
1628 	send_byte(fd, 1, MSG_ZEROCOPY);
1629 
1630 	fds.fd = fd;
1631 	fds.events = 0;
1632 	if (poll(&fds, 1, -1) < 0) {
1633 		perror("poll");
1634 		exit(EXIT_FAILURE);
1635 	}
1636 
1637 	close(fd);
1638 }
1639 
test_stream_msgzcopy_leak_errq_server(const struct test_opts * opts)1640 static void test_stream_msgzcopy_leak_errq_server(const struct test_opts *opts)
1641 {
1642 	int fd;
1643 
1644 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1645 	if (fd < 0) {
1646 		perror("accept");
1647 		exit(EXIT_FAILURE);
1648 	}
1649 
1650 	recv_byte(fd, 1, 0);
1651 	vsock_wait_remote_close(fd);
1652 	close(fd);
1653 }
1654 
1655 /* Test msgzcopy_leak_zcskb is meant to exercise sendmsg() error handling path,
1656  * that might leak an skb. The idea is to fail virtio_transport_init_zcopy_skb()
1657  * by hitting net.core.optmem_max limit in sock_omalloc(), specifically
1658  *
1659  *   vsock_connectible_sendmsg
1660  *     virtio_transport_stream_enqueue
1661  *       virtio_transport_send_pkt_info
1662  *         virtio_transport_init_zcopy_skb
1663  *         . msg_zerocopy_realloc
1664  *         .   msg_zerocopy_alloc
1665  *         .     sock_omalloc
1666  *         .       sk_omem_alloc + size > sysctl_optmem_max
1667  *         return -ENOMEM
1668  *
1669  * We abuse the implementation detail of net/socket.c:____sys_sendmsg().
1670  * sk_omem_alloc can be precisely bumped by sock_kmalloc(), as it is used to
1671  * fetch user-provided control data.
1672  *
1673  * While this approach works for now, it relies on assumptions regarding the
1674  * implementation and configuration (for example, order of net.core.optmem_max
1675  * can not exceed MAX_PAGE_ORDER), which may not hold in the future. A more
1676  * resilient testing could be implemented by leveraging the Fault injection
1677  * framework (CONFIG_FAULT_INJECTION), e.g.
1678  *
1679  *   client# echo N > /sys/kernel/debug/failslab/ignore-gfp-wait
1680  *   client# echo 0 > /sys/kernel/debug/failslab/verbose
1681  *
1682  *   void client(const struct test_opts *opts)
1683  *   {
1684  *       char buf[16];
1685  *       int f, s, i;
1686  *
1687  *       f = open("/proc/self/fail-nth", O_WRONLY);
1688  *
1689  *       for (i = 1; i < 32; i++) {
1690  *           control_writeulong(CONTROL_CONTINUE);
1691  *
1692  *           s = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1693  *           enable_so_zerocopy_check(s);
1694  *
1695  *           sprintf(buf, "%d", i);
1696  *           write(f, buf, strlen(buf));
1697  *
1698  *           send(s, &(char){ 0 }, 1, MSG_ZEROCOPY);
1699  *
1700  *           write(f, "0", 1);
1701  *           close(s);
1702  *       }
1703  *
1704  *       control_writeulong(CONTROL_DONE);
1705  *       close(f);
1706  *   }
1707  *
1708  *   void server(const struct test_opts *opts)
1709  *   {
1710  *       int fd;
1711  *
1712  *       while (control_readulong() == CONTROL_CONTINUE) {
1713  *           fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1714  *           vsock_wait_remote_close(fd);
1715  *           close(fd);
1716  *       }
1717  *   }
1718  *
1719  * Refer to Documentation/fault-injection/fault-injection.rst.
1720  */
1721 #define MAX_PAGE_ORDER	10	/* usually */
1722 #define PAGE_SIZE	4096
1723 
1724 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */
test_stream_msgzcopy_leak_zcskb_client(const struct test_opts * opts)1725 static void test_stream_msgzcopy_leak_zcskb_client(const struct test_opts *opts)
1726 {
1727 	size_t optmem_max, ctl_len, chunk_size;
1728 	struct msghdr msg = { 0 };
1729 	struct iovec iov;
1730 	char *chunk;
1731 	int fd, res;
1732 	FILE *f;
1733 
1734 	f = fopen("/proc/sys/net/core/optmem_max", "r");
1735 	if (!f) {
1736 		perror("fopen(optmem_max)");
1737 		exit(EXIT_FAILURE);
1738 	}
1739 
1740 	if (fscanf(f, "%zu", &optmem_max) != 1) {
1741 		fprintf(stderr, "fscanf(optmem_max) failed\n");
1742 		exit(EXIT_FAILURE);
1743 	}
1744 
1745 	fclose(f);
1746 
1747 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
1748 	if (fd < 0) {
1749 		perror("connect");
1750 		exit(EXIT_FAILURE);
1751 	}
1752 
1753 	enable_so_zerocopy_check(fd);
1754 
1755 	ctl_len = optmem_max - 1;
1756 	if (ctl_len > PAGE_SIZE << MAX_PAGE_ORDER) {
1757 		fprintf(stderr, "Try with net.core.optmem_max = 100000\n");
1758 		exit(EXIT_FAILURE);
1759 	}
1760 
1761 	chunk_size = CMSG_SPACE(ctl_len);
1762 	chunk = malloc(chunk_size);
1763 	if (!chunk) {
1764 		perror("malloc");
1765 		exit(EXIT_FAILURE);
1766 	}
1767 	memset(chunk, 0, chunk_size);
1768 
1769 	iov.iov_base = &(char){ 0 };
1770 	iov.iov_len = 1;
1771 
1772 	msg.msg_iov = &iov;
1773 	msg.msg_iovlen = 1;
1774 	msg.msg_control = chunk;
1775 	msg.msg_controllen = ctl_len;
1776 
1777 	errno = 0;
1778 	res = sendmsg(fd, &msg, MSG_ZEROCOPY);
1779 	if (res >= 0 || errno != ENOMEM) {
1780 		fprintf(stderr, "Expected ENOMEM, got errno=%d res=%d\n",
1781 			errno, res);
1782 		exit(EXIT_FAILURE);
1783 	}
1784 
1785 	close(fd);
1786 }
1787 
test_stream_msgzcopy_leak_zcskb_server(const struct test_opts * opts)1788 static void test_stream_msgzcopy_leak_zcskb_server(const struct test_opts *opts)
1789 {
1790 	int fd;
1791 
1792 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1793 	if (fd < 0) {
1794 		perror("accept");
1795 		exit(EXIT_FAILURE);
1796 	}
1797 
1798 	vsock_wait_remote_close(fd);
1799 	close(fd);
1800 }
1801 
1802 #define MAX_PORT_RETRIES	24	/* net/vmw_vsock/af_vsock.c */
1803 
test_stream_transport_uaf(int cid)1804 static bool test_stream_transport_uaf(int cid)
1805 {
1806 	int sockets[MAX_PORT_RETRIES];
1807 	struct sockaddr_vm addr;
1808 	socklen_t alen;
1809 	int fd, i, c;
1810 	bool ret;
1811 
1812 	/* Probe for a transport by attempting a local CID bind. Unavailable
1813 	 * transport (or more specifically: an unsupported transport/CID
1814 	 * combination) results in EADDRNOTAVAIL, other errnos are fatal.
1815 	 */
1816 	fd = vsock_bind_try(cid, VMADDR_PORT_ANY, SOCK_STREAM);
1817 	if (fd < 0) {
1818 		if (errno != EADDRNOTAVAIL) {
1819 			perror("Unexpected bind() errno");
1820 			exit(EXIT_FAILURE);
1821 		}
1822 
1823 		return false;
1824 	}
1825 
1826 	alen = sizeof(addr);
1827 	if (getsockname(fd, (struct sockaddr *)&addr, &alen)) {
1828 		perror("getsockname");
1829 		exit(EXIT_FAILURE);
1830 	}
1831 
1832 	/* Drain the autobind pool; see __vsock_bind_connectible(). */
1833 	for (i = 0; i < MAX_PORT_RETRIES; ++i)
1834 		sockets[i] = vsock_bind(cid, ++addr.svm_port, SOCK_STREAM);
1835 
1836 	close(fd);
1837 
1838 	/* Setting SOCK_NONBLOCK makes connect() return soon after
1839 	 * (re-)assigning the transport. We are not connecting to anything
1840 	 * anyway, so there is no point entering the main loop in
1841 	 * vsock_connect(); waiting for timeout, checking for signals, etc.
1842 	 */
1843 	fd = socket(AF_VSOCK, SOCK_STREAM | SOCK_NONBLOCK, 0);
1844 	if (fd < 0) {
1845 		perror("socket");
1846 		exit(EXIT_FAILURE);
1847 	}
1848 
1849 	/* Assign transport, while failing to autobind. Autobind pool was
1850 	 * drained, so EADDRNOTAVAIL coming from __vsock_bind_connectible() is
1851 	 * expected.
1852 	 *
1853 	 * One exception is ENODEV which is thrown by vsock_assign_transport(),
1854 	 * i.e. before vsock_auto_bind(), when the only transport loaded is
1855 	 * vhost.
1856 	 */
1857 	if (!connect(fd, (struct sockaddr *)&addr, alen)) {
1858 		fprintf(stderr, "Unexpected connect() success\n");
1859 		exit(EXIT_FAILURE);
1860 	}
1861 	if (errno == ENODEV && cid == VMADDR_CID_HOST) {
1862 		ret = false;
1863 		goto cleanup;
1864 	}
1865 	if (errno != EADDRNOTAVAIL) {
1866 		perror("Unexpected connect() errno");
1867 		exit(EXIT_FAILURE);
1868 	}
1869 
1870 	/* Reassign transport, triggering old transport release and
1871 	 * (potentially) unbinding of an unbound socket.
1872 	 *
1873 	 * Vulnerable system may crash now.
1874 	 */
1875 	for (c = VMADDR_CID_HYPERVISOR; c <= VMADDR_CID_HOST + 1; ++c) {
1876 		if (c != cid) {
1877 			addr.svm_cid = c;
1878 			(void)connect(fd, (struct sockaddr *)&addr, alen);
1879 		}
1880 	}
1881 
1882 	ret = true;
1883 cleanup:
1884 	close(fd);
1885 	while (i--)
1886 		close(sockets[i]);
1887 
1888 	return ret;
1889 }
1890 
1891 /* Test attempts to trigger a transport release for an unbound socket. This can
1892  * lead to a reference count mishandling.
1893  */
test_stream_transport_uaf_client(const struct test_opts * opts)1894 static void test_stream_transport_uaf_client(const struct test_opts *opts)
1895 {
1896 	bool tested = false;
1897 	int cid, tr;
1898 
1899 	for (cid = VMADDR_CID_HYPERVISOR; cid <= VMADDR_CID_HOST + 1; ++cid)
1900 		tested |= test_stream_transport_uaf(cid);
1901 
1902 	tr = get_transports();
1903 	if (!tr)
1904 		fprintf(stderr, "No transports detected\n");
1905 	else if (tr == TRANSPORT_VIRTIO)
1906 		fprintf(stderr, "Setup unsupported: sole virtio transport\n");
1907 	else if (!tested)
1908 		fprintf(stderr, "No transports tested\n");
1909 }
1910 
test_stream_connect_retry_client(const struct test_opts * opts)1911 static void test_stream_connect_retry_client(const struct test_opts *opts)
1912 {
1913 	int fd;
1914 
1915 	fd = socket(AF_VSOCK, SOCK_STREAM, 0);
1916 	if (fd < 0) {
1917 		perror("socket");
1918 		exit(EXIT_FAILURE);
1919 	}
1920 
1921 	if (!vsock_connect_fd(fd, opts->peer_cid, opts->peer_port)) {
1922 		fprintf(stderr, "Unexpected connect() #1 success\n");
1923 		exit(EXIT_FAILURE);
1924 	}
1925 
1926 	control_writeln("LISTEN");
1927 	control_expectln("LISTENING");
1928 
1929 	if (vsock_connect_fd(fd, opts->peer_cid, opts->peer_port)) {
1930 		perror("connect() #2");
1931 		exit(EXIT_FAILURE);
1932 	}
1933 
1934 	close(fd);
1935 }
1936 
test_stream_connect_retry_server(const struct test_opts * opts)1937 static void test_stream_connect_retry_server(const struct test_opts *opts)
1938 {
1939 	int fd;
1940 
1941 	control_expectln("LISTEN");
1942 
1943 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
1944 	if (fd < 0) {
1945 		perror("accept");
1946 		exit(EXIT_FAILURE);
1947 	}
1948 
1949 	vsock_wait_remote_close(fd);
1950 	close(fd);
1951 }
1952 
1953 #define TRANSPORT_CHANGE_TIMEOUT 2 /* seconds */
1954 
test_stream_transport_change_thread(void * vargp)1955 static void *test_stream_transport_change_thread(void *vargp)
1956 {
1957 	pid_t *pid = (pid_t *)vargp;
1958 	int ret;
1959 
1960 	ret = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
1961 	if (ret) {
1962 		fprintf(stderr, "pthread_setcanceltype: %d\n", ret);
1963 		exit(EXIT_FAILURE);
1964 	}
1965 
1966 	while (true) {
1967 		if (kill(*pid, SIGUSR1) < 0) {
1968 			perror("kill");
1969 			exit(EXIT_FAILURE);
1970 		}
1971 	}
1972 	return NULL;
1973 }
1974 
test_transport_change_signal_handler(int signal)1975 static void test_transport_change_signal_handler(int signal)
1976 {
1977 	/* We need a custom handler for SIGUSR1 as the default one terminates the process. */
1978 }
1979 
test_stream_transport_change_client(const struct test_opts * opts)1980 static void test_stream_transport_change_client(const struct test_opts *opts)
1981 {
1982 	__sighandler_t old_handler;
1983 	pid_t pid = getpid();
1984 	pthread_t thread_id;
1985 	time_t tout;
1986 	int ret, tr;
1987 
1988 	tr = get_transports();
1989 
1990 	/* Print a warning if there is a G2H transport loaded.
1991 	 * This is on a best effort basis because VMCI can be either G2H and H2G, and there is
1992 	 * no easy way to understand it.
1993 	 * The bug we are testing only appears when G2H transports are not loaded.
1994 	 * This is because `vsock_assign_transport`, when using CID 0, assigns a G2H transport
1995 	 * to vsk->transport. If none is available it is set to NULL, causing the null-ptr-deref.
1996 	 */
1997 	if (tr & TRANSPORTS_G2H)
1998 		fprintf(stderr, "G2H Transport detected. This test will not fail.\n");
1999 
2000 	old_handler = signal(SIGUSR1, test_transport_change_signal_handler);
2001 	if (old_handler == SIG_ERR) {
2002 		perror("signal");
2003 		exit(EXIT_FAILURE);
2004 	}
2005 
2006 	ret = pthread_create(&thread_id, NULL, test_stream_transport_change_thread, &pid);
2007 	if (ret) {
2008 		fprintf(stderr, "pthread_create: %d\n", ret);
2009 		exit(EXIT_FAILURE);
2010 	}
2011 
2012 	control_expectln("LISTENING");
2013 
2014 	tout = current_nsec() + TRANSPORT_CHANGE_TIMEOUT * NSEC_PER_SEC;
2015 	do {
2016 		struct sockaddr_vm sa = {
2017 			.svm_family = AF_VSOCK,
2018 			.svm_cid = opts->peer_cid,
2019 			.svm_port = opts->peer_port,
2020 		};
2021 		bool send_control = false;
2022 		int s;
2023 
2024 		s = socket(AF_VSOCK, SOCK_STREAM, 0);
2025 		if (s < 0) {
2026 			perror("socket");
2027 			exit(EXIT_FAILURE);
2028 		}
2029 
2030 		/* Although setting SO_LINGER does not affect the original test
2031 		 * for null-ptr-deref, it may trigger a lockdep warning.
2032 		 */
2033 		enable_so_linger(s, 1);
2034 
2035 		ret = connect(s, (struct sockaddr *)&sa, sizeof(sa));
2036 		/* The connect can fail due to signals coming from the thread,
2037 		 * or because the receiver connection queue is full.
2038 		 * Ignoring also the latter case because there is no way
2039 		 * of synchronizing client's connect and server's accept when
2040 		 * connect(s) are constantly being interrupted by signals.
2041 		 */
2042 		if (ret == -1 && (errno != EINTR && errno != ECONNRESET)) {
2043 			perror("connect");
2044 			exit(EXIT_FAILURE);
2045 		}
2046 
2047 		/* Notify the server if the connect() is successful or the
2048 		 * receiver connection queue is full, so it will do accept()
2049 		 * to drain it.
2050 		 */
2051 		if (!ret || errno == ECONNRESET)
2052 			send_control = true;
2053 
2054 		/* Set CID to 0 cause a transport change. */
2055 		sa.svm_cid = 0;
2056 
2057 		/* There is a case where this will not fail:
2058 		 * if the previous connect() is interrupted while the
2059 		 * connection request is already sent, this second
2060 		 * connect() will wait for the response.
2061 		 */
2062 		ret = connect(s, (struct sockaddr *)&sa, sizeof(sa));
2063 		if (!ret || errno == ECONNRESET)
2064 			send_control = true;
2065 
2066 		close(s);
2067 
2068 		if (send_control)
2069 			control_writeulong(CONTROL_CONTINUE);
2070 
2071 	} while (current_nsec() < tout);
2072 
2073 	control_writeulong(CONTROL_DONE);
2074 
2075 	ret = pthread_cancel(thread_id);
2076 	if (ret) {
2077 		fprintf(stderr, "pthread_cancel: %d\n", ret);
2078 		exit(EXIT_FAILURE);
2079 	}
2080 
2081 	ret = pthread_join(thread_id, NULL);
2082 	if (ret) {
2083 		fprintf(stderr, "pthread_join: %d\n", ret);
2084 		exit(EXIT_FAILURE);
2085 	}
2086 
2087 	if (signal(SIGUSR1, old_handler) == SIG_ERR) {
2088 		perror("signal");
2089 		exit(EXIT_FAILURE);
2090 	}
2091 }
2092 
test_stream_transport_change_server(const struct test_opts * opts)2093 static void test_stream_transport_change_server(const struct test_opts *opts)
2094 {
2095 	int s = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port);
2096 
2097 	/* Set the socket to be nonblocking because connects that have been interrupted
2098 	 * (EINTR) can fill the receiver's accept queue anyway, leading to connect failure.
2099 	 * As of today (6.15) in such situation there is no way to understand, from the
2100 	 * client side, if the connection has been queued in the server or not.
2101 	 */
2102 	if (fcntl(s, F_SETFL, fcntl(s, F_GETFL, 0) | O_NONBLOCK) < 0) {
2103 		perror("fcntl");
2104 		exit(EXIT_FAILURE);
2105 	}
2106 	control_writeln("LISTENING");
2107 
2108 	while (control_readulong() == CONTROL_CONTINUE) {
2109 		/* Must accept the connection, otherwise the `listen`
2110 		 * queue will fill up and new connections will fail.
2111 		 * There can be more than one queued connection,
2112 		 * clear them all.
2113 		 */
2114 		while (true) {
2115 			int client = accept(s, NULL, NULL);
2116 
2117 			if (client < 0) {
2118 				if (errno == EAGAIN)
2119 					break;
2120 
2121 				perror("accept");
2122 				exit(EXIT_FAILURE);
2123 			}
2124 
2125 			close(client);
2126 		}
2127 	}
2128 
2129 	close(s);
2130 }
2131 
test_stream_linger_client(const struct test_opts * opts)2132 static void test_stream_linger_client(const struct test_opts *opts)
2133 {
2134 	int fd;
2135 
2136 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
2137 	if (fd < 0) {
2138 		perror("connect");
2139 		exit(EXIT_FAILURE);
2140 	}
2141 
2142 	enable_so_linger(fd, 1);
2143 	close(fd);
2144 }
2145 
test_stream_linger_server(const struct test_opts * opts)2146 static void test_stream_linger_server(const struct test_opts *opts)
2147 {
2148 	int fd;
2149 
2150 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
2151 	if (fd < 0) {
2152 		perror("accept");
2153 		exit(EXIT_FAILURE);
2154 	}
2155 
2156 	vsock_wait_remote_close(fd);
2157 	close(fd);
2158 }
2159 
2160 /* Half of the default to not risk timing out the control channel */
2161 #define LINGER_TIMEOUT	(TIMEOUT / 2)
2162 
test_stream_nolinger_client(const struct test_opts * opts)2163 static void test_stream_nolinger_client(const struct test_opts *opts)
2164 {
2165 	bool waited;
2166 	time_t ns;
2167 	int fd;
2168 
2169 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
2170 	if (fd < 0) {
2171 		perror("connect");
2172 		exit(EXIT_FAILURE);
2173 	}
2174 
2175 	enable_so_linger(fd, LINGER_TIMEOUT);
2176 	send_byte(fd, 1, 0); /* Left unread to expose incorrect behaviour. */
2177 	waited = vsock_wait_sent(fd);
2178 
2179 	ns = current_nsec();
2180 	close(fd);
2181 	ns = current_nsec() - ns;
2182 
2183 	if (!waited) {
2184 		fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n");
2185 	} else if (DIV_ROUND_UP(ns, NSEC_PER_SEC) >= LINGER_TIMEOUT) {
2186 		fprintf(stderr, "Unexpected lingering\n");
2187 		exit(EXIT_FAILURE);
2188 	}
2189 
2190 	control_writeln("DONE");
2191 }
2192 
test_stream_nolinger_server(const struct test_opts * opts)2193 static void test_stream_nolinger_server(const struct test_opts *opts)
2194 {
2195 	int fd;
2196 
2197 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
2198 	if (fd < 0) {
2199 		perror("accept");
2200 		exit(EXIT_FAILURE);
2201 	}
2202 
2203 	control_expectln("DONE");
2204 	close(fd);
2205 }
2206 
test_stream_accepted_setsockopt_client(const struct test_opts * opts)2207 static void test_stream_accepted_setsockopt_client(const struct test_opts *opts)
2208 {
2209 	int fd;
2210 
2211 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
2212 	if (fd < 0) {
2213 		perror("connect");
2214 		exit(EXIT_FAILURE);
2215 	}
2216 
2217 	close(fd);
2218 }
2219 
test_stream_accepted_setsockopt_server(const struct test_opts * opts)2220 static void test_stream_accepted_setsockopt_server(const struct test_opts *opts)
2221 {
2222 	int fd;
2223 
2224 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
2225 	if (fd < 0) {
2226 		perror("accept");
2227 		exit(EXIT_FAILURE);
2228 	}
2229 
2230 	enable_so_zerocopy_check(fd);
2231 	close(fd);
2232 }
2233 
test_stream_tx_credit_bounds_client(const struct test_opts * opts)2234 static void test_stream_tx_credit_bounds_client(const struct test_opts *opts)
2235 {
2236 	unsigned long long sock_buf_size;
2237 	size_t total = 0;
2238 	char buf[4096];
2239 	int fd;
2240 
2241 	memset(buf, 'A', sizeof(buf));
2242 
2243 	fd = vsock_stream_connect(opts->peer_cid, opts->peer_port);
2244 	if (fd < 0) {
2245 		perror("connect");
2246 		exit(EXIT_FAILURE);
2247 	}
2248 
2249 	sock_buf_size = SOCK_BUF_SIZE_SMALL;
2250 
2251 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
2252 			     sock_buf_size,
2253 			     "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
2254 
2255 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
2256 			     sock_buf_size,
2257 			     "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
2258 
2259 	if (fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK) < 0) {
2260 		perror("fcntl(F_SETFL)");
2261 		exit(EXIT_FAILURE);
2262 	}
2263 
2264 	control_expectln("SRVREADY");
2265 
2266 	for (;;) {
2267 		ssize_t sent = send(fd, buf, sizeof(buf), 0);
2268 
2269 		if (sent == 0) {
2270 			fprintf(stderr, "unexpected EOF while sending bytes\n");
2271 			exit(EXIT_FAILURE);
2272 		}
2273 
2274 		if (sent < 0) {
2275 			if (errno == EINTR)
2276 				continue;
2277 
2278 			if (errno == EAGAIN || errno == EWOULDBLOCK)
2279 				break;
2280 
2281 			perror("send");
2282 			exit(EXIT_FAILURE);
2283 		}
2284 
2285 		total += sent;
2286 	}
2287 
2288 	control_writeln("CLIDONE");
2289 	close(fd);
2290 
2291 	/* We should not be able to send more bytes than the value set as
2292 	 * local buffer size.
2293 	 */
2294 	if (total > sock_buf_size) {
2295 		fprintf(stderr,
2296 			"TX credit too large: queued %zu bytes (expected <= %llu)\n",
2297 			total, sock_buf_size);
2298 		exit(EXIT_FAILURE);
2299 	}
2300 }
2301 
test_stream_tx_credit_bounds_server(const struct test_opts * opts)2302 static void test_stream_tx_credit_bounds_server(const struct test_opts *opts)
2303 {
2304 	unsigned long long sock_buf_size;
2305 	int fd;
2306 
2307 	fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL);
2308 	if (fd < 0) {
2309 		perror("accept");
2310 		exit(EXIT_FAILURE);
2311 	}
2312 
2313 	sock_buf_size = SOCK_BUF_SIZE;
2314 
2315 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE,
2316 			     sock_buf_size,
2317 			     "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)");
2318 
2319 	setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE,
2320 			     sock_buf_size,
2321 			     "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)");
2322 
2323 	control_writeln("SRVREADY");
2324 	control_expectln("CLIDONE");
2325 
2326 	close(fd);
2327 }
2328 
2329 static struct test_case test_cases[] = {
2330 	{
2331 		.name = "SOCK_STREAM connection reset",
2332 		.run_client = test_stream_connection_reset,
2333 	},
2334 	{
2335 		.name = "SOCK_STREAM bind only",
2336 		.run_client = test_stream_bind_only_client,
2337 		.run_server = test_stream_bind_only_server,
2338 	},
2339 	{
2340 		.name = "SOCK_STREAM client close",
2341 		.run_client = test_stream_client_close_client,
2342 		.run_server = test_stream_client_close_server,
2343 	},
2344 	{
2345 		.name = "SOCK_STREAM server close",
2346 		.run_client = test_stream_server_close_client,
2347 		.run_server = test_stream_server_close_server,
2348 	},
2349 	{
2350 		.name = "SOCK_STREAM multiple connections",
2351 		.run_client = test_stream_multiconn_client,
2352 		.run_server = test_stream_multiconn_server,
2353 	},
2354 	{
2355 		.name = "SOCK_STREAM MSG_PEEK",
2356 		.run_client = test_stream_msg_peek_client,
2357 		.run_server = test_stream_msg_peek_server,
2358 	},
2359 	{
2360 		.name = "SOCK_SEQPACKET msg bounds",
2361 		.run_client = test_seqpacket_msg_bounds_client,
2362 		.run_server = test_seqpacket_msg_bounds_server,
2363 	},
2364 	{
2365 		.name = "SOCK_SEQPACKET MSG_TRUNC flag",
2366 		.run_client = test_seqpacket_msg_trunc_client,
2367 		.run_server = test_seqpacket_msg_trunc_server,
2368 	},
2369 	{
2370 		.name = "SOCK_SEQPACKET timeout",
2371 		.run_client = test_seqpacket_timeout_client,
2372 		.run_server = test_seqpacket_timeout_server,
2373 	},
2374 	{
2375 		.name = "SOCK_SEQPACKET invalid receive buffer",
2376 		.run_client = test_seqpacket_invalid_rec_buffer_client,
2377 		.run_server = test_seqpacket_invalid_rec_buffer_server,
2378 	},
2379 	{
2380 		.name = "SOCK_STREAM poll() + SO_RCVLOWAT",
2381 		.run_client = test_stream_poll_rcvlowat_client,
2382 		.run_server = test_stream_poll_rcvlowat_server,
2383 	},
2384 	{
2385 		.name = "SOCK_SEQPACKET big message",
2386 		.run_client = test_seqpacket_bigmsg_client,
2387 		.run_server = test_seqpacket_bigmsg_server,
2388 	},
2389 	{
2390 		.name = "SOCK_STREAM test invalid buffer",
2391 		.run_client = test_stream_inv_buf_client,
2392 		.run_server = test_stream_inv_buf_server,
2393 	},
2394 	{
2395 		.name = "SOCK_SEQPACKET test invalid buffer",
2396 		.run_client = test_seqpacket_inv_buf_client,
2397 		.run_server = test_seqpacket_inv_buf_server,
2398 	},
2399 	{
2400 		.name = "SOCK_STREAM virtio skb merge",
2401 		.run_client = test_stream_virtio_skb_merge_client,
2402 		.run_server = test_stream_virtio_skb_merge_server,
2403 	},
2404 	{
2405 		.name = "SOCK_SEQPACKET MSG_PEEK",
2406 		.run_client = test_seqpacket_msg_peek_client,
2407 		.run_server = test_seqpacket_msg_peek_server,
2408 	},
2409 	{
2410 		.name = "SOCK_STREAM SHUT_WR",
2411 		.run_client = test_stream_shutwr_client,
2412 		.run_server = test_stream_shutwr_server,
2413 	},
2414 	{
2415 		.name = "SOCK_STREAM SHUT_RD",
2416 		.run_client = test_stream_shutrd_client,
2417 		.run_server = test_stream_shutrd_server,
2418 	},
2419 	{
2420 		.name = "SOCK_STREAM MSG_ZEROCOPY",
2421 		.run_client = test_stream_msgzcopy_client,
2422 		.run_server = test_stream_msgzcopy_server,
2423 	},
2424 	{
2425 		.name = "SOCK_SEQPACKET MSG_ZEROCOPY",
2426 		.run_client = test_seqpacket_msgzcopy_client,
2427 		.run_server = test_seqpacket_msgzcopy_server,
2428 	},
2429 	{
2430 		.name = "SOCK_STREAM MSG_ZEROCOPY empty MSG_ERRQUEUE",
2431 		.run_client = test_stream_msgzcopy_empty_errq_client,
2432 		.run_server = test_stream_msgzcopy_empty_errq_server,
2433 	},
2434 	{
2435 		.name = "SOCK_STREAM double bind connect",
2436 		.run_client = test_double_bind_connect_client,
2437 		.run_server = test_double_bind_connect_server,
2438 	},
2439 	{
2440 		.name = "SOCK_STREAM virtio credit update + SO_RCVLOWAT",
2441 		.run_client = test_stream_rcvlowat_def_cred_upd_client,
2442 		.run_server = test_stream_cred_upd_on_set_rcvlowat,
2443 	},
2444 	{
2445 		.name = "SOCK_STREAM virtio credit update + low rx_bytes",
2446 		.run_client = test_stream_rcvlowat_def_cred_upd_client,
2447 		.run_server = test_stream_cred_upd_on_low_rx_bytes,
2448 	},
2449 	{
2450 		.name = "SOCK_STREAM ioctl(SIOCOUTQ) 0 unsent bytes",
2451 		.run_client = test_stream_unsent_bytes_client,
2452 		.run_server = test_stream_unsent_bytes_server,
2453 	},
2454 	{
2455 		.name = "SOCK_SEQPACKET ioctl(SIOCOUTQ) 0 unsent bytes",
2456 		.run_client = test_seqpacket_unsent_bytes_client,
2457 		.run_server = test_seqpacket_unsent_bytes_server,
2458 	},
2459 	{
2460 		.name = "SOCK_STREAM leak accept queue",
2461 		.run_client = test_stream_leak_acceptq_client,
2462 		.run_server = test_stream_leak_acceptq_server,
2463 	},
2464 	{
2465 		.name = "SOCK_STREAM MSG_ZEROCOPY leak MSG_ERRQUEUE",
2466 		.run_client = test_stream_msgzcopy_leak_errq_client,
2467 		.run_server = test_stream_msgzcopy_leak_errq_server,
2468 	},
2469 	{
2470 		.name = "SOCK_STREAM MSG_ZEROCOPY leak completion skb",
2471 		.run_client = test_stream_msgzcopy_leak_zcskb_client,
2472 		.run_server = test_stream_msgzcopy_leak_zcskb_server,
2473 	},
2474 	{
2475 		.name = "SOCK_STREAM transport release use-after-free",
2476 		.run_client = test_stream_transport_uaf_client,
2477 	},
2478 	{
2479 		.name = "SOCK_STREAM retry failed connect()",
2480 		.run_client = test_stream_connect_retry_client,
2481 		.run_server = test_stream_connect_retry_server,
2482 	},
2483 	{
2484 		.name = "SOCK_STREAM SO_LINGER null-ptr-deref",
2485 		.run_client = test_stream_linger_client,
2486 		.run_server = test_stream_linger_server,
2487 	},
2488 	{
2489 		.name = "SOCK_STREAM SO_LINGER close() on unread",
2490 		.run_client = test_stream_nolinger_client,
2491 		.run_server = test_stream_nolinger_server,
2492 	},
2493 	{
2494 		.name = "SOCK_STREAM transport change null-ptr-deref, lockdep warn",
2495 		.run_client = test_stream_transport_change_client,
2496 		.run_server = test_stream_transport_change_server,
2497 	},
2498 	{
2499 		.name = "SOCK_STREAM ioctl(SIOCINQ) functionality",
2500 		.run_client = test_stream_unread_bytes_client,
2501 		.run_server = test_stream_unread_bytes_server,
2502 	},
2503 	{
2504 		.name = "SOCK_SEQPACKET ioctl(SIOCINQ) functionality",
2505 		.run_client = test_seqpacket_unread_bytes_client,
2506 		.run_server = test_seqpacket_unread_bytes_server,
2507 	},
2508 	{
2509 		.name = "SOCK_STREAM accept()ed socket custom setsockopt()",
2510 		.run_client = test_stream_accepted_setsockopt_client,
2511 		.run_server = test_stream_accepted_setsockopt_server,
2512 	},
2513 	{
2514 		.name = "SOCK_STREAM virtio MSG_ZEROCOPY coalescence corruption",
2515 		.run_client = test_stream_msgzcopy_mangle_client,
2516 		.run_server = test_stream_msgzcopy_mangle_server,
2517 	},
2518 	{
2519 		.name = "SOCK_STREAM TX credit bounds",
2520 		.run_client = test_stream_tx_credit_bounds_client,
2521 		.run_server = test_stream_tx_credit_bounds_server,
2522 	},
2523 	{},
2524 };
2525 
2526 static const char optstring[] = "";
2527 static const struct option longopts[] = {
2528 	{
2529 		.name = "control-host",
2530 		.has_arg = required_argument,
2531 		.val = 'H',
2532 	},
2533 	{
2534 		.name = "control-port",
2535 		.has_arg = required_argument,
2536 		.val = 'P',
2537 	},
2538 	{
2539 		.name = "mode",
2540 		.has_arg = required_argument,
2541 		.val = 'm',
2542 	},
2543 	{
2544 		.name = "peer-cid",
2545 		.has_arg = required_argument,
2546 		.val = 'p',
2547 	},
2548 	{
2549 		.name = "peer-port",
2550 		.has_arg = required_argument,
2551 		.val = 'q',
2552 	},
2553 	{
2554 		.name = "list",
2555 		.has_arg = no_argument,
2556 		.val = 'l',
2557 	},
2558 	{
2559 		.name = "skip",
2560 		.has_arg = required_argument,
2561 		.val = 's',
2562 	},
2563 	{
2564 		.name = "pick",
2565 		.has_arg = required_argument,
2566 		.val = 't',
2567 	},
2568 	{
2569 		.name = "help",
2570 		.has_arg = no_argument,
2571 		.val = '?',
2572 	},
2573 	{},
2574 };
2575 
usage(void)2576 static void usage(void)
2577 {
2578 	fprintf(stderr, "Usage: vsock_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid> [--peer-port=<port>] [--list] [--skip=<test_id>]\n"
2579 		"\n"
2580 		"  Server: vsock_test --control-port=1234 --mode=server --peer-cid=3\n"
2581 		"  Client: vsock_test --control-host=192.168.0.1 --control-port=1234 --mode=client --peer-cid=2\n"
2582 		"\n"
2583 		"Run vsock.ko tests.  Must be launched in both guest\n"
2584 		"and host.  One side must use --mode=client and\n"
2585 		"the other side must use --mode=server.\n"
2586 		"\n"
2587 		"A TCP control socket connection is used to coordinate tests\n"
2588 		"between the client and the server.  The server requires a\n"
2589 		"listen address and the client requires an address to\n"
2590 		"connect to.\n"
2591 		"\n"
2592 		"The CID of the other side must be given with --peer-cid=<cid>.\n"
2593 		"During the test, two AF_VSOCK ports will be used: the port\n"
2594 		"specified with --peer-port=<port> (or the default port)\n"
2595 		"and the next one.\n"
2596 		"\n"
2597 		"Options:\n"
2598 		"  --help                 This help message\n"
2599 		"  --control-host <host>  Server IP address to connect to\n"
2600 		"  --control-port <port>  Server port to listen on/connect to\n"
2601 		"  --mode client|server   Server or client mode\n"
2602 		"  --peer-cid <cid>       CID of the other side\n"
2603 		"  --peer-port <port>     AF_VSOCK port used for the test [default: %d]\n"
2604 		"  --list                 List of tests that will be executed\n"
2605 		"  --pick <test_id>       Test ID to execute selectively;\n"
2606 		"                         use multiple --pick options to select more tests\n"
2607 		"  --skip <test_id>       Test ID to skip;\n"
2608 		"                         use multiple --skip options to skip more tests\n",
2609 		DEFAULT_PEER_PORT
2610 		);
2611 	exit(EXIT_FAILURE);
2612 }
2613 
main(int argc,char ** argv)2614 int main(int argc, char **argv)
2615 {
2616 	const char *control_host = NULL;
2617 	const char *control_port = NULL;
2618 	struct test_opts opts = {
2619 		.mode = TEST_MODE_UNSET,
2620 		.peer_cid = VMADDR_CID_ANY,
2621 		.peer_port = DEFAULT_PEER_PORT,
2622 	};
2623 
2624 	srand(time(NULL));
2625 	init_signals();
2626 
2627 	for (;;) {
2628 		int opt = getopt_long(argc, argv, optstring, longopts, NULL);
2629 
2630 		if (opt == -1)
2631 			break;
2632 
2633 		switch (opt) {
2634 		case 'H':
2635 			control_host = optarg;
2636 			break;
2637 		case 'm':
2638 			if (strcmp(optarg, "client") == 0)
2639 				opts.mode = TEST_MODE_CLIENT;
2640 			else if (strcmp(optarg, "server") == 0)
2641 				opts.mode = TEST_MODE_SERVER;
2642 			else {
2643 				fprintf(stderr, "--mode must be \"client\" or \"server\"\n");
2644 				return EXIT_FAILURE;
2645 			}
2646 			break;
2647 		case 'p':
2648 			opts.peer_cid = parse_cid(optarg);
2649 			break;
2650 		case 'q':
2651 			opts.peer_port = parse_port(optarg);
2652 			break;
2653 		case 'P':
2654 			control_port = optarg;
2655 			break;
2656 		case 'l':
2657 			list_tests(test_cases);
2658 			break;
2659 		case 's':
2660 			skip_test(test_cases, ARRAY_SIZE(test_cases) - 1,
2661 				  optarg);
2662 			break;
2663 		case 't':
2664 			pick_test(test_cases, ARRAY_SIZE(test_cases) - 1,
2665 				  optarg);
2666 			break;
2667 		case '?':
2668 		default:
2669 			usage();
2670 		}
2671 	}
2672 
2673 	if (!control_port)
2674 		usage();
2675 	if (opts.mode == TEST_MODE_UNSET)
2676 		usage();
2677 	if (opts.peer_cid == VMADDR_CID_ANY)
2678 		usage();
2679 
2680 	if (!control_host) {
2681 		if (opts.mode != TEST_MODE_SERVER)
2682 			usage();
2683 		control_host = "0.0.0.0";
2684 	}
2685 
2686 	control_init(control_host, control_port,
2687 		     opts.mode == TEST_MODE_SERVER);
2688 
2689 	run_tests(test_cases, &opts);
2690 
2691 	control_cleanup();
2692 	return EXIT_SUCCESS;
2693 }
2694