1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vsock_test - vsock.ko test suite 4 * 5 * Copyright (C) 2017 Red Hat, Inc. 6 * 7 * Author: Stefan Hajnoczi <stefanha@redhat.com> 8 */ 9 10 #include <getopt.h> 11 #include <stdio.h> 12 #include <stdlib.h> 13 #include <string.h> 14 #include <errno.h> 15 #include <unistd.h> 16 #include <linux/kernel.h> 17 #include <sys/types.h> 18 #include <sys/socket.h> 19 #include <time.h> 20 #include <sys/mman.h> 21 #include <poll.h> 22 #include <signal.h> 23 #include <sys/ioctl.h> 24 #include <linux/time64.h> 25 #include <pthread.h> 26 #include <fcntl.h> 27 28 #include "vsock_test_zerocopy.h" 29 #include "timeout.h" 30 #include "control.h" 31 #include "util.h" 32 33 /* Basic messages for control_writeulong(), control_readulong() */ 34 #define CONTROL_CONTINUE 1 35 #define CONTROL_DONE 0 36 37 static void test_stream_connection_reset(const struct test_opts *opts) 38 { 39 union { 40 struct sockaddr sa; 41 struct sockaddr_vm svm; 42 } addr = { 43 .svm = { 44 .svm_family = AF_VSOCK, 45 .svm_port = opts->peer_port, 46 .svm_cid = opts->peer_cid, 47 }, 48 }; 49 int ret; 50 int fd; 51 52 fd = socket(AF_VSOCK, SOCK_STREAM, 0); 53 54 timeout_begin(TIMEOUT); 55 do { 56 ret = connect(fd, &addr.sa, sizeof(addr.svm)); 57 timeout_check("connect"); 58 } while (ret < 0 && errno == EINTR); 59 timeout_end(); 60 61 if (ret != -1) { 62 fprintf(stderr, "expected connect(2) failure, got %d\n", ret); 63 exit(EXIT_FAILURE); 64 } 65 if (errno != ECONNRESET) { 66 fprintf(stderr, "unexpected connect(2) errno %d\n", errno); 67 exit(EXIT_FAILURE); 68 } 69 70 close(fd); 71 } 72 73 static void test_stream_bind_only_client(const struct test_opts *opts) 74 { 75 union { 76 struct sockaddr sa; 77 struct sockaddr_vm svm; 78 } addr = { 79 .svm = { 80 .svm_family = AF_VSOCK, 81 .svm_port = opts->peer_port, 82 .svm_cid = opts->peer_cid, 83 }, 84 }; 85 int ret; 86 int fd; 87 88 /* Wait for the server to be ready */ 89 control_expectln("BIND"); 90 91 fd = socket(AF_VSOCK, SOCK_STREAM, 0); 92 93 timeout_begin(TIMEOUT); 94 do { 95 ret = connect(fd, &addr.sa, sizeof(addr.svm)); 96 timeout_check("connect"); 97 } while (ret < 0 && errno == EINTR); 98 timeout_end(); 99 100 if (ret != -1) { 101 fprintf(stderr, "expected connect(2) failure, got %d\n", ret); 102 exit(EXIT_FAILURE); 103 } 104 if (errno != ECONNRESET) { 105 fprintf(stderr, "unexpected connect(2) errno %d\n", errno); 106 exit(EXIT_FAILURE); 107 } 108 109 /* Notify the server that the client has finished */ 110 control_writeln("DONE"); 111 112 close(fd); 113 } 114 115 static void test_stream_bind_only_server(const struct test_opts *opts) 116 { 117 int fd; 118 119 fd = vsock_bind(VMADDR_CID_ANY, opts->peer_port, SOCK_STREAM); 120 121 /* Notify the client that the server is ready */ 122 control_writeln("BIND"); 123 124 /* Wait for the client to finish */ 125 control_expectln("DONE"); 126 127 close(fd); 128 } 129 130 static void test_stream_client_close_client(const struct test_opts *opts) 131 { 132 int fd; 133 134 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 135 if (fd < 0) { 136 perror("connect"); 137 exit(EXIT_FAILURE); 138 } 139 140 send_byte(fd, 1, 0); 141 close(fd); 142 } 143 144 static void test_stream_client_close_server(const struct test_opts *opts) 145 { 146 int fd; 147 148 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 149 if (fd < 0) { 150 perror("accept"); 151 exit(EXIT_FAILURE); 152 } 153 154 /* Wait for the remote to close the connection, before check 155 * -EPIPE error on send. 156 */ 157 vsock_wait_remote_close(fd); 158 159 send_byte(fd, -EPIPE, 0); 160 recv_byte(fd, 1, 0); 161 recv_byte(fd, 0, 0); 162 close(fd); 163 } 164 165 static void test_stream_server_close_client(const struct test_opts *opts) 166 { 167 int fd; 168 169 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 170 if (fd < 0) { 171 perror("connect"); 172 exit(EXIT_FAILURE); 173 } 174 175 /* Wait for the remote to close the connection, before check 176 * -EPIPE error on send. 177 */ 178 vsock_wait_remote_close(fd); 179 180 send_byte(fd, -EPIPE, 0); 181 recv_byte(fd, 1, 0); 182 recv_byte(fd, 0, 0); 183 close(fd); 184 } 185 186 static void test_stream_server_close_server(const struct test_opts *opts) 187 { 188 int fd; 189 190 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 191 if (fd < 0) { 192 perror("accept"); 193 exit(EXIT_FAILURE); 194 } 195 196 send_byte(fd, 1, 0); 197 close(fd); 198 } 199 200 /* With the standard socket sizes, VMCI is able to support about 100 201 * concurrent stream connections. 202 */ 203 #define MULTICONN_NFDS 100 204 205 static void test_stream_multiconn_client(const struct test_opts *opts) 206 { 207 int fds[MULTICONN_NFDS]; 208 int i; 209 210 for (i = 0; i < MULTICONN_NFDS; i++) { 211 fds[i] = vsock_stream_connect(opts->peer_cid, opts->peer_port); 212 if (fds[i] < 0) { 213 perror("connect"); 214 exit(EXIT_FAILURE); 215 } 216 } 217 218 for (i = 0; i < MULTICONN_NFDS; i++) { 219 if (i % 2) 220 recv_byte(fds[i], 1, 0); 221 else 222 send_byte(fds[i], 1, 0); 223 } 224 225 for (i = 0; i < MULTICONN_NFDS; i++) 226 close(fds[i]); 227 } 228 229 static void test_stream_multiconn_server(const struct test_opts *opts) 230 { 231 int fds[MULTICONN_NFDS]; 232 int i; 233 234 for (i = 0; i < MULTICONN_NFDS; i++) { 235 fds[i] = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 236 if (fds[i] < 0) { 237 perror("accept"); 238 exit(EXIT_FAILURE); 239 } 240 } 241 242 for (i = 0; i < MULTICONN_NFDS; i++) { 243 if (i % 2) 244 send_byte(fds[i], 1, 0); 245 else 246 recv_byte(fds[i], 1, 0); 247 } 248 249 for (i = 0; i < MULTICONN_NFDS; i++) 250 close(fds[i]); 251 } 252 253 #define MSG_PEEK_BUF_LEN 64 254 255 static void test_msg_peek_client(const struct test_opts *opts, 256 bool seqpacket) 257 { 258 unsigned char buf[MSG_PEEK_BUF_LEN]; 259 int fd; 260 int i; 261 262 if (seqpacket) 263 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 264 else 265 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 266 267 if (fd < 0) { 268 perror("connect"); 269 exit(EXIT_FAILURE); 270 } 271 272 for (i = 0; i < sizeof(buf); i++) 273 buf[i] = rand() & 0xFF; 274 275 control_expectln("SRVREADY"); 276 277 send_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 278 279 close(fd); 280 } 281 282 static void test_msg_peek_server(const struct test_opts *opts, 283 bool seqpacket) 284 { 285 unsigned char buf_half[MSG_PEEK_BUF_LEN / 2]; 286 unsigned char buf_normal[MSG_PEEK_BUF_LEN]; 287 unsigned char buf_peek[MSG_PEEK_BUF_LEN]; 288 int fd; 289 290 if (seqpacket) 291 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 292 else 293 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 294 295 if (fd < 0) { 296 perror("accept"); 297 exit(EXIT_FAILURE); 298 } 299 300 /* Peek from empty socket. */ 301 recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK | MSG_DONTWAIT, 302 -EAGAIN); 303 304 control_writeln("SRVREADY"); 305 306 /* Peek part of data. */ 307 recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK, sizeof(buf_half)); 308 309 /* Peek whole data. */ 310 recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK, sizeof(buf_peek)); 311 312 /* Compare partial and full peek. */ 313 if (memcmp(buf_half, buf_peek, sizeof(buf_half))) { 314 fprintf(stderr, "Partial peek data mismatch\n"); 315 exit(EXIT_FAILURE); 316 } 317 318 if (seqpacket) { 319 /* This type of socket supports MSG_TRUNC flag, 320 * so check it with MSG_PEEK. We must get length 321 * of the message. 322 */ 323 recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK | MSG_TRUNC, 324 sizeof(buf_peek)); 325 } 326 327 recv_buf(fd, buf_normal, sizeof(buf_normal), 0, sizeof(buf_normal)); 328 329 /* Compare full peek and normal read. */ 330 if (memcmp(buf_peek, buf_normal, sizeof(buf_peek))) { 331 fprintf(stderr, "Full peek data mismatch\n"); 332 exit(EXIT_FAILURE); 333 } 334 335 close(fd); 336 } 337 338 static void test_stream_msg_peek_client(const struct test_opts *opts) 339 { 340 return test_msg_peek_client(opts, false); 341 } 342 343 static void test_stream_msg_peek_server(const struct test_opts *opts) 344 { 345 return test_msg_peek_server(opts, false); 346 } 347 348 #define SOCK_BUF_SIZE (2 * 1024 * 1024) 349 #define MAX_MSG_PAGES 4 350 351 static void test_seqpacket_msg_bounds_client(const struct test_opts *opts) 352 { 353 unsigned long curr_hash; 354 size_t max_msg_size; 355 int page_size; 356 int msg_count; 357 int fd; 358 359 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 360 if (fd < 0) { 361 perror("connect"); 362 exit(EXIT_FAILURE); 363 } 364 365 /* Wait, until receiver sets buffer size. */ 366 control_expectln("SRVREADY"); 367 368 curr_hash = 0; 369 page_size = getpagesize(); 370 max_msg_size = MAX_MSG_PAGES * page_size; 371 msg_count = SOCK_BUF_SIZE / max_msg_size; 372 373 for (int i = 0; i < msg_count; i++) { 374 size_t buf_size; 375 int flags; 376 void *buf; 377 378 /* Use "small" buffers and "big" buffers. */ 379 if (i & 1) 380 buf_size = page_size + 381 (rand() % (max_msg_size - page_size)); 382 else 383 buf_size = 1 + (rand() % page_size); 384 385 buf = malloc(buf_size); 386 387 if (!buf) { 388 perror("malloc"); 389 exit(EXIT_FAILURE); 390 } 391 392 memset(buf, rand() & 0xff, buf_size); 393 /* Set at least one MSG_EOR + some random. */ 394 if (i == (msg_count / 2) || (rand() & 1)) { 395 flags = MSG_EOR; 396 curr_hash++; 397 } else { 398 flags = 0; 399 } 400 401 send_buf(fd, buf, buf_size, flags, buf_size); 402 403 /* 404 * Hash sum is computed at both client and server in 405 * the same way: 406 * H += hash('message data') 407 * Such hash "controls" both data integrity and message 408 * bounds. After data exchange, both sums are compared 409 * using control socket, and if message bounds wasn't 410 * broken - two values must be equal. 411 */ 412 curr_hash += hash_djb2(buf, buf_size); 413 free(buf); 414 } 415 416 control_writeln("SENDDONE"); 417 control_writeulong(curr_hash); 418 close(fd); 419 } 420 421 static void test_seqpacket_msg_bounds_server(const struct test_opts *opts) 422 { 423 unsigned long long sock_buf_size; 424 unsigned long remote_hash; 425 unsigned long curr_hash; 426 int fd; 427 struct msghdr msg = {0}; 428 struct iovec iov = {0}; 429 430 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 431 if (fd < 0) { 432 perror("accept"); 433 exit(EXIT_FAILURE); 434 } 435 436 sock_buf_size = SOCK_BUF_SIZE; 437 438 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, 439 sock_buf_size, 440 "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); 441 442 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 443 sock_buf_size, 444 "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); 445 446 /* Ready to receive data. */ 447 control_writeln("SRVREADY"); 448 /* Wait, until peer sends whole data. */ 449 control_expectln("SENDDONE"); 450 iov.iov_len = MAX_MSG_PAGES * getpagesize(); 451 iov.iov_base = malloc(iov.iov_len); 452 if (!iov.iov_base) { 453 perror("malloc"); 454 exit(EXIT_FAILURE); 455 } 456 457 msg.msg_iov = &iov; 458 msg.msg_iovlen = 1; 459 460 curr_hash = 0; 461 462 while (1) { 463 ssize_t recv_size; 464 465 recv_size = recvmsg(fd, &msg, 0); 466 467 if (!recv_size) 468 break; 469 470 if (recv_size < 0) { 471 perror("recvmsg"); 472 exit(EXIT_FAILURE); 473 } 474 475 if (msg.msg_flags & MSG_EOR) 476 curr_hash++; 477 478 curr_hash += hash_djb2(msg.msg_iov[0].iov_base, recv_size); 479 } 480 481 free(iov.iov_base); 482 close(fd); 483 remote_hash = control_readulong(); 484 485 if (curr_hash != remote_hash) { 486 fprintf(stderr, "Message bounds broken\n"); 487 exit(EXIT_FAILURE); 488 } 489 } 490 491 #define MESSAGE_TRUNC_SZ 32 492 static void test_seqpacket_msg_trunc_client(const struct test_opts *opts) 493 { 494 int fd; 495 char buf[MESSAGE_TRUNC_SZ]; 496 497 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 498 if (fd < 0) { 499 perror("connect"); 500 exit(EXIT_FAILURE); 501 } 502 503 send_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 504 505 control_writeln("SENDDONE"); 506 close(fd); 507 } 508 509 static void test_seqpacket_msg_trunc_server(const struct test_opts *opts) 510 { 511 int fd; 512 char buf[MESSAGE_TRUNC_SZ / 2]; 513 struct msghdr msg = {0}; 514 struct iovec iov = {0}; 515 516 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 517 if (fd < 0) { 518 perror("accept"); 519 exit(EXIT_FAILURE); 520 } 521 522 control_expectln("SENDDONE"); 523 iov.iov_base = buf; 524 iov.iov_len = sizeof(buf); 525 msg.msg_iov = &iov; 526 msg.msg_iovlen = 1; 527 528 ssize_t ret = recvmsg(fd, &msg, MSG_TRUNC); 529 530 if (ret != MESSAGE_TRUNC_SZ) { 531 printf("%zi\n", ret); 532 perror("MSG_TRUNC doesn't work"); 533 exit(EXIT_FAILURE); 534 } 535 536 if (!(msg.msg_flags & MSG_TRUNC)) { 537 fprintf(stderr, "MSG_TRUNC expected\n"); 538 exit(EXIT_FAILURE); 539 } 540 541 close(fd); 542 } 543 544 static time_t current_nsec(void) 545 { 546 struct timespec ts; 547 548 if (clock_gettime(CLOCK_REALTIME, &ts)) { 549 perror("clock_gettime(3) failed"); 550 exit(EXIT_FAILURE); 551 } 552 553 return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec; 554 } 555 556 #define RCVTIMEO_TIMEOUT_SEC 1 557 #define READ_OVERHEAD_NSEC 250000000 /* 0.25 sec */ 558 559 static void test_seqpacket_timeout_client(const struct test_opts *opts) 560 { 561 int fd; 562 struct timeval tv; 563 char dummy; 564 time_t read_enter_ns; 565 time_t read_overhead_ns; 566 567 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 568 if (fd < 0) { 569 perror("connect"); 570 exit(EXIT_FAILURE); 571 } 572 573 tv.tv_sec = RCVTIMEO_TIMEOUT_SEC; 574 tv.tv_usec = 0; 575 576 setsockopt_timeval_check(fd, SOL_SOCKET, SO_RCVTIMEO, tv, 577 "setsockopt(SO_RCVTIMEO)"); 578 579 read_enter_ns = current_nsec(); 580 581 if (read(fd, &dummy, sizeof(dummy)) != -1) { 582 fprintf(stderr, 583 "expected 'dummy' read(2) failure\n"); 584 exit(EXIT_FAILURE); 585 } 586 587 if (errno != EAGAIN) { 588 perror("EAGAIN expected"); 589 exit(EXIT_FAILURE); 590 } 591 592 read_overhead_ns = current_nsec() - read_enter_ns - 593 NSEC_PER_SEC * RCVTIMEO_TIMEOUT_SEC; 594 595 if (read_overhead_ns > READ_OVERHEAD_NSEC) { 596 fprintf(stderr, 597 "too much time in read(2), %lu > %i ns\n", 598 read_overhead_ns, READ_OVERHEAD_NSEC); 599 exit(EXIT_FAILURE); 600 } 601 602 control_writeln("WAITDONE"); 603 close(fd); 604 } 605 606 static void test_seqpacket_timeout_server(const struct test_opts *opts) 607 { 608 int fd; 609 610 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 611 if (fd < 0) { 612 perror("accept"); 613 exit(EXIT_FAILURE); 614 } 615 616 control_expectln("WAITDONE"); 617 close(fd); 618 } 619 620 static void test_seqpacket_bigmsg_client(const struct test_opts *opts) 621 { 622 unsigned long long sock_buf_size; 623 size_t buf_size; 624 socklen_t len; 625 void *data; 626 int fd; 627 628 len = sizeof(sock_buf_size); 629 630 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 631 if (fd < 0) { 632 perror("connect"); 633 exit(EXIT_FAILURE); 634 } 635 636 if (getsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 637 &sock_buf_size, &len)) { 638 perror("getsockopt"); 639 exit(EXIT_FAILURE); 640 } 641 642 sock_buf_size++; 643 644 /* size_t can be < unsigned long long */ 645 buf_size = (size_t)sock_buf_size; 646 if (buf_size != sock_buf_size) { 647 fprintf(stderr, "Returned BUFFER_SIZE too large\n"); 648 exit(EXIT_FAILURE); 649 } 650 651 data = malloc(buf_size); 652 if (!data) { 653 perror("malloc"); 654 exit(EXIT_FAILURE); 655 } 656 657 send_buf(fd, data, buf_size, 0, -EMSGSIZE); 658 659 control_writeln("CLISENT"); 660 661 free(data); 662 close(fd); 663 } 664 665 static void test_seqpacket_bigmsg_server(const struct test_opts *opts) 666 { 667 int fd; 668 669 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 670 if (fd < 0) { 671 perror("accept"); 672 exit(EXIT_FAILURE); 673 } 674 675 control_expectln("CLISENT"); 676 677 close(fd); 678 } 679 680 #define BUF_PATTERN_1 'a' 681 #define BUF_PATTERN_2 'b' 682 683 static void test_seqpacket_invalid_rec_buffer_client(const struct test_opts *opts) 684 { 685 int fd; 686 unsigned char *buf1; 687 unsigned char *buf2; 688 int buf_size = getpagesize() * 3; 689 690 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 691 if (fd < 0) { 692 perror("connect"); 693 exit(EXIT_FAILURE); 694 } 695 696 buf1 = malloc(buf_size); 697 if (!buf1) { 698 perror("'malloc()' for 'buf1'"); 699 exit(EXIT_FAILURE); 700 } 701 702 buf2 = malloc(buf_size); 703 if (!buf2) { 704 perror("'malloc()' for 'buf2'"); 705 exit(EXIT_FAILURE); 706 } 707 708 memset(buf1, BUF_PATTERN_1, buf_size); 709 memset(buf2, BUF_PATTERN_2, buf_size); 710 711 send_buf(fd, buf1, buf_size, 0, buf_size); 712 713 send_buf(fd, buf2, buf_size, 0, buf_size); 714 715 close(fd); 716 } 717 718 static void test_seqpacket_invalid_rec_buffer_server(const struct test_opts *opts) 719 { 720 int fd; 721 unsigned char *broken_buf; 722 unsigned char *valid_buf; 723 int page_size = getpagesize(); 724 int buf_size = page_size * 3; 725 ssize_t res; 726 int prot = PROT_READ | PROT_WRITE; 727 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 728 int i; 729 730 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 731 if (fd < 0) { 732 perror("accept"); 733 exit(EXIT_FAILURE); 734 } 735 736 /* Setup first buffer. */ 737 broken_buf = mmap(NULL, buf_size, prot, flags, -1, 0); 738 if (broken_buf == MAP_FAILED) { 739 perror("mmap for 'broken_buf'"); 740 exit(EXIT_FAILURE); 741 } 742 743 /* Unmap "hole" in buffer. */ 744 if (munmap(broken_buf + page_size, page_size)) { 745 perror("'broken_buf' setup"); 746 exit(EXIT_FAILURE); 747 } 748 749 valid_buf = mmap(NULL, buf_size, prot, flags, -1, 0); 750 if (valid_buf == MAP_FAILED) { 751 perror("mmap for 'valid_buf'"); 752 exit(EXIT_FAILURE); 753 } 754 755 /* Try to fill buffer with unmapped middle. */ 756 res = read(fd, broken_buf, buf_size); 757 if (res != -1) { 758 fprintf(stderr, 759 "expected 'broken_buf' read(2) failure, got %zi\n", 760 res); 761 exit(EXIT_FAILURE); 762 } 763 764 if (errno != EFAULT) { 765 perror("unexpected errno of 'broken_buf'"); 766 exit(EXIT_FAILURE); 767 } 768 769 /* Try to fill valid buffer. */ 770 res = read(fd, valid_buf, buf_size); 771 if (res < 0) { 772 perror("unexpected 'valid_buf' read(2) failure"); 773 exit(EXIT_FAILURE); 774 } 775 776 if (res != buf_size) { 777 fprintf(stderr, 778 "invalid 'valid_buf' read(2), expected %i, got %zi\n", 779 buf_size, res); 780 exit(EXIT_FAILURE); 781 } 782 783 for (i = 0; i < buf_size; i++) { 784 if (valid_buf[i] != BUF_PATTERN_2) { 785 fprintf(stderr, 786 "invalid pattern for 'valid_buf' at %i, expected %hhX, got %hhX\n", 787 i, BUF_PATTERN_2, valid_buf[i]); 788 exit(EXIT_FAILURE); 789 } 790 } 791 792 /* Unmap buffers. */ 793 munmap(broken_buf, page_size); 794 munmap(broken_buf + page_size * 2, page_size); 795 munmap(valid_buf, buf_size); 796 close(fd); 797 } 798 799 #define RCVLOWAT_BUF_SIZE 128 800 801 static void test_stream_poll_rcvlowat_server(const struct test_opts *opts) 802 { 803 int fd; 804 int i; 805 806 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 807 if (fd < 0) { 808 perror("accept"); 809 exit(EXIT_FAILURE); 810 } 811 812 /* Send 1 byte. */ 813 send_byte(fd, 1, 0); 814 815 control_writeln("SRVSENT"); 816 817 /* Wait until client is ready to receive rest of data. */ 818 control_expectln("CLNSENT"); 819 820 for (i = 0; i < RCVLOWAT_BUF_SIZE - 1; i++) 821 send_byte(fd, 1, 0); 822 823 /* Keep socket in active state. */ 824 control_expectln("POLLDONE"); 825 826 close(fd); 827 } 828 829 static void test_stream_poll_rcvlowat_client(const struct test_opts *opts) 830 { 831 int lowat_val = RCVLOWAT_BUF_SIZE; 832 char buf[RCVLOWAT_BUF_SIZE]; 833 struct pollfd fds; 834 short poll_flags; 835 int fd; 836 837 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 838 if (fd < 0) { 839 perror("connect"); 840 exit(EXIT_FAILURE); 841 } 842 843 setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, 844 lowat_val, "setsockopt(SO_RCVLOWAT)"); 845 846 control_expectln("SRVSENT"); 847 848 /* At this point, server sent 1 byte. */ 849 fds.fd = fd; 850 poll_flags = POLLIN | POLLRDNORM; 851 fds.events = poll_flags; 852 853 /* Try to wait for 1 sec. */ 854 if (poll(&fds, 1, 1000) < 0) { 855 perror("poll"); 856 exit(EXIT_FAILURE); 857 } 858 859 /* poll() must return nothing. */ 860 if (fds.revents) { 861 fprintf(stderr, "Unexpected poll result %hx\n", 862 fds.revents); 863 exit(EXIT_FAILURE); 864 } 865 866 /* Tell server to send rest of data. */ 867 control_writeln("CLNSENT"); 868 869 /* Poll for data. */ 870 if (poll(&fds, 1, 10000) < 0) { 871 perror("poll"); 872 exit(EXIT_FAILURE); 873 } 874 875 /* Only these two bits are expected. */ 876 if (fds.revents != poll_flags) { 877 fprintf(stderr, "Unexpected poll result %hx\n", 878 fds.revents); 879 exit(EXIT_FAILURE); 880 } 881 882 /* Use MSG_DONTWAIT, if call is going to wait, EAGAIN 883 * will be returned. 884 */ 885 recv_buf(fd, buf, sizeof(buf), MSG_DONTWAIT, RCVLOWAT_BUF_SIZE); 886 887 control_writeln("POLLDONE"); 888 889 close(fd); 890 } 891 892 #define INV_BUF_TEST_DATA_LEN 512 893 894 static void test_inv_buf_client(const struct test_opts *opts, bool stream) 895 { 896 unsigned char data[INV_BUF_TEST_DATA_LEN] = {0}; 897 ssize_t expected_ret; 898 int fd; 899 900 if (stream) 901 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 902 else 903 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 904 905 if (fd < 0) { 906 perror("connect"); 907 exit(EXIT_FAILURE); 908 } 909 910 control_expectln("SENDDONE"); 911 912 /* Use invalid buffer here. */ 913 recv_buf(fd, NULL, sizeof(data), 0, -EFAULT); 914 915 if (stream) { 916 /* For SOCK_STREAM we must continue reading. */ 917 expected_ret = sizeof(data); 918 } else { 919 /* For SOCK_SEQPACKET socket's queue must be empty. */ 920 expected_ret = -EAGAIN; 921 } 922 923 recv_buf(fd, data, sizeof(data), MSG_DONTWAIT, expected_ret); 924 925 control_writeln("DONE"); 926 927 close(fd); 928 } 929 930 static void test_inv_buf_server(const struct test_opts *opts, bool stream) 931 { 932 unsigned char data[INV_BUF_TEST_DATA_LEN] = {0}; 933 int fd; 934 935 if (stream) 936 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 937 else 938 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 939 940 if (fd < 0) { 941 perror("accept"); 942 exit(EXIT_FAILURE); 943 } 944 945 send_buf(fd, data, sizeof(data), 0, sizeof(data)); 946 947 control_writeln("SENDDONE"); 948 949 control_expectln("DONE"); 950 951 close(fd); 952 } 953 954 static void test_stream_inv_buf_client(const struct test_opts *opts) 955 { 956 test_inv_buf_client(opts, true); 957 } 958 959 static void test_stream_inv_buf_server(const struct test_opts *opts) 960 { 961 test_inv_buf_server(opts, true); 962 } 963 964 static void test_seqpacket_inv_buf_client(const struct test_opts *opts) 965 { 966 test_inv_buf_client(opts, false); 967 } 968 969 static void test_seqpacket_inv_buf_server(const struct test_opts *opts) 970 { 971 test_inv_buf_server(opts, false); 972 } 973 974 #define HELLO_STR "HELLO" 975 #define WORLD_STR "WORLD" 976 977 static void test_stream_virtio_skb_merge_client(const struct test_opts *opts) 978 { 979 int fd; 980 981 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 982 if (fd < 0) { 983 perror("connect"); 984 exit(EXIT_FAILURE); 985 } 986 987 /* Send first skbuff. */ 988 send_buf(fd, HELLO_STR, strlen(HELLO_STR), 0, strlen(HELLO_STR)); 989 990 control_writeln("SEND0"); 991 /* Peer reads part of first skbuff. */ 992 control_expectln("REPLY0"); 993 994 /* Send second skbuff, it will be appended to the first. */ 995 send_buf(fd, WORLD_STR, strlen(WORLD_STR), 0, strlen(WORLD_STR)); 996 997 control_writeln("SEND1"); 998 /* Peer reads merged skbuff packet. */ 999 control_expectln("REPLY1"); 1000 1001 close(fd); 1002 } 1003 1004 static void test_stream_virtio_skb_merge_server(const struct test_opts *opts) 1005 { 1006 size_t read = 0, to_read; 1007 unsigned char buf[64]; 1008 int fd; 1009 1010 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1011 if (fd < 0) { 1012 perror("accept"); 1013 exit(EXIT_FAILURE); 1014 } 1015 1016 control_expectln("SEND0"); 1017 1018 /* Read skbuff partially. */ 1019 to_read = 2; 1020 recv_buf(fd, buf + read, to_read, 0, to_read); 1021 read += to_read; 1022 1023 control_writeln("REPLY0"); 1024 control_expectln("SEND1"); 1025 1026 /* Read the rest of both buffers */ 1027 to_read = strlen(HELLO_STR WORLD_STR) - read; 1028 recv_buf(fd, buf + read, to_read, 0, to_read); 1029 read += to_read; 1030 1031 /* No more bytes should be there */ 1032 to_read = sizeof(buf) - read; 1033 recv_buf(fd, buf + read, to_read, MSG_DONTWAIT, -EAGAIN); 1034 1035 if (memcmp(buf, HELLO_STR WORLD_STR, strlen(HELLO_STR WORLD_STR))) { 1036 fprintf(stderr, "pattern mismatch\n"); 1037 exit(EXIT_FAILURE); 1038 } 1039 1040 control_writeln("REPLY1"); 1041 1042 close(fd); 1043 } 1044 1045 static void test_seqpacket_msg_peek_client(const struct test_opts *opts) 1046 { 1047 return test_msg_peek_client(opts, true); 1048 } 1049 1050 static void test_seqpacket_msg_peek_server(const struct test_opts *opts) 1051 { 1052 return test_msg_peek_server(opts, true); 1053 } 1054 1055 static sig_atomic_t have_sigpipe; 1056 1057 static void sigpipe(int signo) 1058 { 1059 have_sigpipe = 1; 1060 } 1061 1062 #define SEND_SLEEP_USEC (10 * 1000) 1063 1064 static void test_stream_check_sigpipe(int fd) 1065 { 1066 ssize_t res; 1067 1068 have_sigpipe = 0; 1069 1070 /* When the other peer calls shutdown(SHUT_RD), there is a chance that 1071 * the send() call could occur before the message carrying the close 1072 * information arrives over the transport. In such cases, the send() 1073 * might still succeed. To avoid this race, let's retry the send() call 1074 * a few times, ensuring the test is more reliable. 1075 */ 1076 timeout_begin(TIMEOUT); 1077 while(1) { 1078 res = send(fd, "A", 1, 0); 1079 if (res == -1 && errno != EINTR) 1080 break; 1081 1082 /* Sleep a little before trying again to avoid flooding the 1083 * other peer and filling its receive buffer, causing 1084 * false-negative. 1085 */ 1086 timeout_usleep(SEND_SLEEP_USEC); 1087 timeout_check("send"); 1088 } 1089 timeout_end(); 1090 1091 if (errno != EPIPE) { 1092 fprintf(stderr, "unexpected send(2) errno %d\n", errno); 1093 exit(EXIT_FAILURE); 1094 } 1095 if (!have_sigpipe) { 1096 fprintf(stderr, "SIGPIPE expected\n"); 1097 exit(EXIT_FAILURE); 1098 } 1099 1100 have_sigpipe = 0; 1101 1102 timeout_begin(TIMEOUT); 1103 while(1) { 1104 res = send(fd, "A", 1, MSG_NOSIGNAL); 1105 if (res == -1 && errno != EINTR) 1106 break; 1107 1108 timeout_usleep(SEND_SLEEP_USEC); 1109 timeout_check("send"); 1110 } 1111 timeout_end(); 1112 1113 if (errno != EPIPE) { 1114 fprintf(stderr, "unexpected send(2) errno %d\n", errno); 1115 exit(EXIT_FAILURE); 1116 } 1117 if (have_sigpipe) { 1118 fprintf(stderr, "SIGPIPE not expected\n"); 1119 exit(EXIT_FAILURE); 1120 } 1121 } 1122 1123 static void test_stream_shutwr_client(const struct test_opts *opts) 1124 { 1125 int fd; 1126 1127 struct sigaction act = { 1128 .sa_handler = sigpipe, 1129 }; 1130 1131 sigaction(SIGPIPE, &act, NULL); 1132 1133 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1134 if (fd < 0) { 1135 perror("connect"); 1136 exit(EXIT_FAILURE); 1137 } 1138 1139 if (shutdown(fd, SHUT_WR)) { 1140 perror("shutdown"); 1141 exit(EXIT_FAILURE); 1142 } 1143 1144 test_stream_check_sigpipe(fd); 1145 1146 control_writeln("CLIENTDONE"); 1147 1148 close(fd); 1149 } 1150 1151 static void test_stream_shutwr_server(const struct test_opts *opts) 1152 { 1153 int fd; 1154 1155 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1156 if (fd < 0) { 1157 perror("accept"); 1158 exit(EXIT_FAILURE); 1159 } 1160 1161 control_expectln("CLIENTDONE"); 1162 1163 close(fd); 1164 } 1165 1166 static void test_stream_shutrd_client(const struct test_opts *opts) 1167 { 1168 int fd; 1169 1170 struct sigaction act = { 1171 .sa_handler = sigpipe, 1172 }; 1173 1174 sigaction(SIGPIPE, &act, NULL); 1175 1176 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1177 if (fd < 0) { 1178 perror("connect"); 1179 exit(EXIT_FAILURE); 1180 } 1181 1182 control_expectln("SHUTRDDONE"); 1183 1184 test_stream_check_sigpipe(fd); 1185 1186 control_writeln("CLIENTDONE"); 1187 1188 close(fd); 1189 } 1190 1191 static void test_stream_shutrd_server(const struct test_opts *opts) 1192 { 1193 int fd; 1194 1195 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1196 if (fd < 0) { 1197 perror("accept"); 1198 exit(EXIT_FAILURE); 1199 } 1200 1201 if (shutdown(fd, SHUT_RD)) { 1202 perror("shutdown"); 1203 exit(EXIT_FAILURE); 1204 } 1205 1206 control_writeln("SHUTRDDONE"); 1207 control_expectln("CLIENTDONE"); 1208 1209 close(fd); 1210 } 1211 1212 static void test_double_bind_connect_server(const struct test_opts *opts) 1213 { 1214 int listen_fd, client_fd, i; 1215 struct sockaddr_vm sa_client; 1216 socklen_t socklen_client = sizeof(sa_client); 1217 1218 listen_fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port); 1219 1220 for (i = 0; i < 2; i++) { 1221 control_writeln("LISTENING"); 1222 1223 timeout_begin(TIMEOUT); 1224 do { 1225 client_fd = accept(listen_fd, (struct sockaddr *)&sa_client, 1226 &socklen_client); 1227 timeout_check("accept"); 1228 } while (client_fd < 0 && errno == EINTR); 1229 timeout_end(); 1230 1231 if (client_fd < 0) { 1232 perror("accept"); 1233 exit(EXIT_FAILURE); 1234 } 1235 1236 /* Waiting for remote peer to close connection */ 1237 vsock_wait_remote_close(client_fd); 1238 } 1239 1240 close(listen_fd); 1241 } 1242 1243 static void test_double_bind_connect_client(const struct test_opts *opts) 1244 { 1245 int i, client_fd; 1246 1247 for (i = 0; i < 2; i++) { 1248 /* Wait until server is ready to accept a new connection */ 1249 control_expectln("LISTENING"); 1250 1251 /* We use 'peer_port + 1' as "some" port for the 'bind()' 1252 * call. It is safe for overflow, but must be considered, 1253 * when running multiple test applications simultaneously 1254 * where 'peer-port' argument differs by 1. 1255 */ 1256 client_fd = vsock_bind_connect(opts->peer_cid, opts->peer_port, 1257 opts->peer_port + 1, SOCK_STREAM); 1258 1259 close(client_fd); 1260 } 1261 } 1262 1263 #define MSG_BUF_IOCTL_LEN 64 1264 static void test_unsent_bytes_server(const struct test_opts *opts, int type) 1265 { 1266 unsigned char buf[MSG_BUF_IOCTL_LEN]; 1267 int client_fd; 1268 1269 client_fd = vsock_accept(VMADDR_CID_ANY, opts->peer_port, NULL, type); 1270 if (client_fd < 0) { 1271 perror("accept"); 1272 exit(EXIT_FAILURE); 1273 } 1274 1275 recv_buf(client_fd, buf, sizeof(buf), 0, sizeof(buf)); 1276 control_writeln("RECEIVED"); 1277 1278 close(client_fd); 1279 } 1280 1281 static void test_unsent_bytes_client(const struct test_opts *opts, int type) 1282 { 1283 unsigned char buf[MSG_BUF_IOCTL_LEN]; 1284 int fd; 1285 1286 fd = vsock_connect(opts->peer_cid, opts->peer_port, type); 1287 if (fd < 0) { 1288 perror("connect"); 1289 exit(EXIT_FAILURE); 1290 } 1291 1292 for (int i = 0; i < sizeof(buf); i++) 1293 buf[i] = rand() & 0xFF; 1294 1295 send_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 1296 control_expectln("RECEIVED"); 1297 1298 /* SIOCOUTQ isn't guaranteed to instantly track sent data. Even though 1299 * the "RECEIVED" message means that the other side has received the 1300 * data, there can be a delay in our kernel before updating the "unsent 1301 * bytes" counter. vsock_wait_sent() will repeat SIOCOUTQ until it 1302 * returns 0. 1303 */ 1304 if (!vsock_wait_sent(fd)) 1305 fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n"); 1306 1307 close(fd); 1308 } 1309 1310 static void test_stream_unsent_bytes_client(const struct test_opts *opts) 1311 { 1312 test_unsent_bytes_client(opts, SOCK_STREAM); 1313 } 1314 1315 static void test_stream_unsent_bytes_server(const struct test_opts *opts) 1316 { 1317 test_unsent_bytes_server(opts, SOCK_STREAM); 1318 } 1319 1320 static void test_seqpacket_unsent_bytes_client(const struct test_opts *opts) 1321 { 1322 test_unsent_bytes_client(opts, SOCK_SEQPACKET); 1323 } 1324 1325 static void test_seqpacket_unsent_bytes_server(const struct test_opts *opts) 1326 { 1327 test_unsent_bytes_server(opts, SOCK_SEQPACKET); 1328 } 1329 1330 #define RCVLOWAT_CREDIT_UPD_BUF_SIZE (1024 * 128) 1331 /* This define is the same as in 'include/linux/virtio_vsock.h': 1332 * it is used to decide when to send credit update message during 1333 * reading from rx queue of a socket. Value and its usage in 1334 * kernel is important for this test. 1335 */ 1336 #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) 1337 1338 static void test_stream_rcvlowat_def_cred_upd_client(const struct test_opts *opts) 1339 { 1340 size_t buf_size; 1341 void *buf; 1342 int fd; 1343 1344 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1345 if (fd < 0) { 1346 perror("connect"); 1347 exit(EXIT_FAILURE); 1348 } 1349 1350 /* Send 1 byte more than peer's buffer size. */ 1351 buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE + 1; 1352 1353 buf = malloc(buf_size); 1354 if (!buf) { 1355 perror("malloc"); 1356 exit(EXIT_FAILURE); 1357 } 1358 1359 /* Wait until peer sets needed buffer size. */ 1360 recv_byte(fd, 1, 0); 1361 1362 if (send(fd, buf, buf_size, 0) != buf_size) { 1363 perror("send failed"); 1364 exit(EXIT_FAILURE); 1365 } 1366 1367 free(buf); 1368 close(fd); 1369 } 1370 1371 static void test_stream_credit_update_test(const struct test_opts *opts, 1372 bool low_rx_bytes_test) 1373 { 1374 int recv_buf_size; 1375 struct pollfd fds; 1376 size_t buf_size; 1377 unsigned long long sock_buf_size; 1378 void *buf; 1379 int fd; 1380 1381 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1382 if (fd < 0) { 1383 perror("accept"); 1384 exit(EXIT_FAILURE); 1385 } 1386 1387 buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE; 1388 1389 /* size_t can be < unsigned long long */ 1390 sock_buf_size = buf_size; 1391 1392 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 1393 sock_buf_size, 1394 "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); 1395 1396 if (low_rx_bytes_test) { 1397 /* Set new SO_RCVLOWAT here. This enables sending credit 1398 * update when number of bytes if our rx queue become < 1399 * SO_RCVLOWAT value. 1400 */ 1401 recv_buf_size = 1 + VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; 1402 1403 setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, 1404 recv_buf_size, "setsockopt(SO_RCVLOWAT)"); 1405 } 1406 1407 /* Send one dummy byte here, because 'setsockopt()' above also 1408 * sends special packet which tells sender to update our buffer 1409 * size. This 'send_byte()' will serialize such packet with data 1410 * reads in a loop below. Sender starts transmission only when 1411 * it receives this single byte. 1412 */ 1413 send_byte(fd, 1, 0); 1414 1415 buf = malloc(buf_size); 1416 if (!buf) { 1417 perror("malloc"); 1418 exit(EXIT_FAILURE); 1419 } 1420 1421 /* Wait until there will be 128KB of data in rx queue. */ 1422 while (1) { 1423 ssize_t res; 1424 1425 res = recv(fd, buf, buf_size, MSG_PEEK); 1426 if (res == buf_size) 1427 break; 1428 1429 if (res <= 0) { 1430 fprintf(stderr, "unexpected 'recv()' return: %zi\n", res); 1431 exit(EXIT_FAILURE); 1432 } 1433 } 1434 1435 /* There is 128KB of data in the socket's rx queue, dequeue first 1436 * 64KB, credit update is sent if 'low_rx_bytes_test' == true. 1437 * Otherwise, credit update is sent in 'if (!low_rx_bytes_test)'. 1438 */ 1439 recv_buf_size = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; 1440 recv_buf(fd, buf, recv_buf_size, 0, recv_buf_size); 1441 1442 if (!low_rx_bytes_test) { 1443 recv_buf_size++; 1444 1445 /* Updating SO_RCVLOWAT will send credit update. */ 1446 setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, 1447 recv_buf_size, "setsockopt(SO_RCVLOWAT)"); 1448 } 1449 1450 fds.fd = fd; 1451 fds.events = POLLIN | POLLRDNORM | POLLERR | 1452 POLLRDHUP | POLLHUP; 1453 1454 /* This 'poll()' will return once we receive last byte 1455 * sent by client. 1456 */ 1457 if (poll(&fds, 1, -1) < 0) { 1458 perror("poll"); 1459 exit(EXIT_FAILURE); 1460 } 1461 1462 if (fds.revents & POLLERR) { 1463 fprintf(stderr, "'poll()' error\n"); 1464 exit(EXIT_FAILURE); 1465 } 1466 1467 if (fds.revents & (POLLIN | POLLRDNORM)) { 1468 recv_buf(fd, buf, recv_buf_size, MSG_DONTWAIT, recv_buf_size); 1469 } else { 1470 /* These flags must be set, as there is at 1471 * least 64KB of data ready to read. 1472 */ 1473 fprintf(stderr, "POLLIN | POLLRDNORM expected\n"); 1474 exit(EXIT_FAILURE); 1475 } 1476 1477 free(buf); 1478 close(fd); 1479 } 1480 1481 static void test_stream_cred_upd_on_low_rx_bytes(const struct test_opts *opts) 1482 { 1483 test_stream_credit_update_test(opts, true); 1484 } 1485 1486 static void test_stream_cred_upd_on_set_rcvlowat(const struct test_opts *opts) 1487 { 1488 test_stream_credit_update_test(opts, false); 1489 } 1490 1491 /* The goal of test leak_acceptq is to stress the race between connect() and 1492 * close(listener). Implementation of client/server loops boils down to: 1493 * 1494 * client server 1495 * ------ ------ 1496 * write(CONTINUE) 1497 * expect(CONTINUE) 1498 * listen() 1499 * write(LISTENING) 1500 * expect(LISTENING) 1501 * connect() close() 1502 */ 1503 #define ACCEPTQ_LEAK_RACE_TIMEOUT 2 /* seconds */ 1504 1505 static void test_stream_leak_acceptq_client(const struct test_opts *opts) 1506 { 1507 time_t tout; 1508 int fd; 1509 1510 tout = current_nsec() + ACCEPTQ_LEAK_RACE_TIMEOUT * NSEC_PER_SEC; 1511 do { 1512 control_writeulong(CONTROL_CONTINUE); 1513 1514 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1515 if (fd >= 0) 1516 close(fd); 1517 } while (current_nsec() < tout); 1518 1519 control_writeulong(CONTROL_DONE); 1520 } 1521 1522 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */ 1523 static void test_stream_leak_acceptq_server(const struct test_opts *opts) 1524 { 1525 int fd; 1526 1527 while (control_readulong() == CONTROL_CONTINUE) { 1528 fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port); 1529 control_writeln("LISTENING"); 1530 close(fd); 1531 } 1532 } 1533 1534 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */ 1535 static void test_stream_msgzcopy_leak_errq_client(const struct test_opts *opts) 1536 { 1537 struct pollfd fds = { 0 }; 1538 int fd; 1539 1540 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1541 if (fd < 0) { 1542 perror("connect"); 1543 exit(EXIT_FAILURE); 1544 } 1545 1546 enable_so_zerocopy_check(fd); 1547 send_byte(fd, 1, MSG_ZEROCOPY); 1548 1549 fds.fd = fd; 1550 fds.events = 0; 1551 if (poll(&fds, 1, -1) < 0) { 1552 perror("poll"); 1553 exit(EXIT_FAILURE); 1554 } 1555 1556 close(fd); 1557 } 1558 1559 static void test_stream_msgzcopy_leak_errq_server(const struct test_opts *opts) 1560 { 1561 int fd; 1562 1563 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1564 if (fd < 0) { 1565 perror("accept"); 1566 exit(EXIT_FAILURE); 1567 } 1568 1569 recv_byte(fd, 1, 0); 1570 vsock_wait_remote_close(fd); 1571 close(fd); 1572 } 1573 1574 /* Test msgzcopy_leak_zcskb is meant to exercise sendmsg() error handling path, 1575 * that might leak an skb. The idea is to fail virtio_transport_init_zcopy_skb() 1576 * by hitting net.core.optmem_max limit in sock_omalloc(), specifically 1577 * 1578 * vsock_connectible_sendmsg 1579 * virtio_transport_stream_enqueue 1580 * virtio_transport_send_pkt_info 1581 * virtio_transport_init_zcopy_skb 1582 * . msg_zerocopy_realloc 1583 * . msg_zerocopy_alloc 1584 * . sock_omalloc 1585 * . sk_omem_alloc + size > sysctl_optmem_max 1586 * return -ENOMEM 1587 * 1588 * We abuse the implementation detail of net/socket.c:____sys_sendmsg(). 1589 * sk_omem_alloc can be precisely bumped by sock_kmalloc(), as it is used to 1590 * fetch user-provided control data. 1591 * 1592 * While this approach works for now, it relies on assumptions regarding the 1593 * implementation and configuration (for example, order of net.core.optmem_max 1594 * can not exceed MAX_PAGE_ORDER), which may not hold in the future. A more 1595 * resilient testing could be implemented by leveraging the Fault injection 1596 * framework (CONFIG_FAULT_INJECTION), e.g. 1597 * 1598 * client# echo N > /sys/kernel/debug/failslab/ignore-gfp-wait 1599 * client# echo 0 > /sys/kernel/debug/failslab/verbose 1600 * 1601 * void client(const struct test_opts *opts) 1602 * { 1603 * char buf[16]; 1604 * int f, s, i; 1605 * 1606 * f = open("/proc/self/fail-nth", O_WRONLY); 1607 * 1608 * for (i = 1; i < 32; i++) { 1609 * control_writeulong(CONTROL_CONTINUE); 1610 * 1611 * s = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1612 * enable_so_zerocopy_check(s); 1613 * 1614 * sprintf(buf, "%d", i); 1615 * write(f, buf, strlen(buf)); 1616 * 1617 * send(s, &(char){ 0 }, 1, MSG_ZEROCOPY); 1618 * 1619 * write(f, "0", 1); 1620 * close(s); 1621 * } 1622 * 1623 * control_writeulong(CONTROL_DONE); 1624 * close(f); 1625 * } 1626 * 1627 * void server(const struct test_opts *opts) 1628 * { 1629 * int fd; 1630 * 1631 * while (control_readulong() == CONTROL_CONTINUE) { 1632 * fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1633 * vsock_wait_remote_close(fd); 1634 * close(fd); 1635 * } 1636 * } 1637 * 1638 * Refer to Documentation/fault-injection/fault-injection.rst. 1639 */ 1640 #define MAX_PAGE_ORDER 10 /* usually */ 1641 #define PAGE_SIZE 4096 1642 1643 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */ 1644 static void test_stream_msgzcopy_leak_zcskb_client(const struct test_opts *opts) 1645 { 1646 size_t optmem_max, ctl_len, chunk_size; 1647 struct msghdr msg = { 0 }; 1648 struct iovec iov; 1649 char *chunk; 1650 int fd, res; 1651 FILE *f; 1652 1653 f = fopen("/proc/sys/net/core/optmem_max", "r"); 1654 if (!f) { 1655 perror("fopen(optmem_max)"); 1656 exit(EXIT_FAILURE); 1657 } 1658 1659 if (fscanf(f, "%zu", &optmem_max) != 1) { 1660 fprintf(stderr, "fscanf(optmem_max) failed\n"); 1661 exit(EXIT_FAILURE); 1662 } 1663 1664 fclose(f); 1665 1666 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1667 if (fd < 0) { 1668 perror("connect"); 1669 exit(EXIT_FAILURE); 1670 } 1671 1672 enable_so_zerocopy_check(fd); 1673 1674 ctl_len = optmem_max - 1; 1675 if (ctl_len > PAGE_SIZE << MAX_PAGE_ORDER) { 1676 fprintf(stderr, "Try with net.core.optmem_max = 100000\n"); 1677 exit(EXIT_FAILURE); 1678 } 1679 1680 chunk_size = CMSG_SPACE(ctl_len); 1681 chunk = malloc(chunk_size); 1682 if (!chunk) { 1683 perror("malloc"); 1684 exit(EXIT_FAILURE); 1685 } 1686 memset(chunk, 0, chunk_size); 1687 1688 iov.iov_base = &(char){ 0 }; 1689 iov.iov_len = 1; 1690 1691 msg.msg_iov = &iov; 1692 msg.msg_iovlen = 1; 1693 msg.msg_control = chunk; 1694 msg.msg_controllen = ctl_len; 1695 1696 errno = 0; 1697 res = sendmsg(fd, &msg, MSG_ZEROCOPY); 1698 if (res >= 0 || errno != ENOMEM) { 1699 fprintf(stderr, "Expected ENOMEM, got errno=%d res=%d\n", 1700 errno, res); 1701 exit(EXIT_FAILURE); 1702 } 1703 1704 close(fd); 1705 } 1706 1707 static void test_stream_msgzcopy_leak_zcskb_server(const struct test_opts *opts) 1708 { 1709 int fd; 1710 1711 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1712 if (fd < 0) { 1713 perror("accept"); 1714 exit(EXIT_FAILURE); 1715 } 1716 1717 vsock_wait_remote_close(fd); 1718 close(fd); 1719 } 1720 1721 #define MAX_PORT_RETRIES 24 /* net/vmw_vsock/af_vsock.c */ 1722 1723 static bool test_stream_transport_uaf(int cid) 1724 { 1725 int sockets[MAX_PORT_RETRIES]; 1726 struct sockaddr_vm addr; 1727 socklen_t alen; 1728 int fd, i, c; 1729 bool ret; 1730 1731 /* Probe for a transport by attempting a local CID bind. Unavailable 1732 * transport (or more specifically: an unsupported transport/CID 1733 * combination) results in EADDRNOTAVAIL, other errnos are fatal. 1734 */ 1735 fd = vsock_bind_try(cid, VMADDR_PORT_ANY, SOCK_STREAM); 1736 if (fd < 0) { 1737 if (errno != EADDRNOTAVAIL) { 1738 perror("Unexpected bind() errno"); 1739 exit(EXIT_FAILURE); 1740 } 1741 1742 return false; 1743 } 1744 1745 alen = sizeof(addr); 1746 if (getsockname(fd, (struct sockaddr *)&addr, &alen)) { 1747 perror("getsockname"); 1748 exit(EXIT_FAILURE); 1749 } 1750 1751 /* Drain the autobind pool; see __vsock_bind_connectible(). */ 1752 for (i = 0; i < MAX_PORT_RETRIES; ++i) 1753 sockets[i] = vsock_bind(cid, ++addr.svm_port, SOCK_STREAM); 1754 1755 close(fd); 1756 1757 /* Setting SOCK_NONBLOCK makes connect() return soon after 1758 * (re-)assigning the transport. We are not connecting to anything 1759 * anyway, so there is no point entering the main loop in 1760 * vsock_connect(); waiting for timeout, checking for signals, etc. 1761 */ 1762 fd = socket(AF_VSOCK, SOCK_STREAM | SOCK_NONBLOCK, 0); 1763 if (fd < 0) { 1764 perror("socket"); 1765 exit(EXIT_FAILURE); 1766 } 1767 1768 /* Assign transport, while failing to autobind. Autobind pool was 1769 * drained, so EADDRNOTAVAIL coming from __vsock_bind_connectible() is 1770 * expected. 1771 * 1772 * One exception is ENODEV which is thrown by vsock_assign_transport(), 1773 * i.e. before vsock_auto_bind(), when the only transport loaded is 1774 * vhost. 1775 */ 1776 if (!connect(fd, (struct sockaddr *)&addr, alen)) { 1777 fprintf(stderr, "Unexpected connect() success\n"); 1778 exit(EXIT_FAILURE); 1779 } 1780 if (errno == ENODEV && cid == VMADDR_CID_HOST) { 1781 ret = false; 1782 goto cleanup; 1783 } 1784 if (errno != EADDRNOTAVAIL) { 1785 perror("Unexpected connect() errno"); 1786 exit(EXIT_FAILURE); 1787 } 1788 1789 /* Reassign transport, triggering old transport release and 1790 * (potentially) unbinding of an unbound socket. 1791 * 1792 * Vulnerable system may crash now. 1793 */ 1794 for (c = VMADDR_CID_HYPERVISOR; c <= VMADDR_CID_HOST + 1; ++c) { 1795 if (c != cid) { 1796 addr.svm_cid = c; 1797 (void)connect(fd, (struct sockaddr *)&addr, alen); 1798 } 1799 } 1800 1801 ret = true; 1802 cleanup: 1803 close(fd); 1804 while (i--) 1805 close(sockets[i]); 1806 1807 return ret; 1808 } 1809 1810 /* Test attempts to trigger a transport release for an unbound socket. This can 1811 * lead to a reference count mishandling. 1812 */ 1813 static void test_stream_transport_uaf_client(const struct test_opts *opts) 1814 { 1815 bool tested = false; 1816 int cid, tr; 1817 1818 for (cid = VMADDR_CID_HYPERVISOR; cid <= VMADDR_CID_HOST + 1; ++cid) 1819 tested |= test_stream_transport_uaf(cid); 1820 1821 tr = get_transports(); 1822 if (!tr) 1823 fprintf(stderr, "No transports detected\n"); 1824 else if (tr == TRANSPORT_VIRTIO) 1825 fprintf(stderr, "Setup unsupported: sole virtio transport\n"); 1826 else if (!tested) 1827 fprintf(stderr, "No transports tested\n"); 1828 } 1829 1830 static void test_stream_connect_retry_client(const struct test_opts *opts) 1831 { 1832 int fd; 1833 1834 fd = socket(AF_VSOCK, SOCK_STREAM, 0); 1835 if (fd < 0) { 1836 perror("socket"); 1837 exit(EXIT_FAILURE); 1838 } 1839 1840 if (!vsock_connect_fd(fd, opts->peer_cid, opts->peer_port)) { 1841 fprintf(stderr, "Unexpected connect() #1 success\n"); 1842 exit(EXIT_FAILURE); 1843 } 1844 1845 control_writeln("LISTEN"); 1846 control_expectln("LISTENING"); 1847 1848 if (vsock_connect_fd(fd, opts->peer_cid, opts->peer_port)) { 1849 perror("connect() #2"); 1850 exit(EXIT_FAILURE); 1851 } 1852 1853 close(fd); 1854 } 1855 1856 static void test_stream_connect_retry_server(const struct test_opts *opts) 1857 { 1858 int fd; 1859 1860 control_expectln("LISTEN"); 1861 1862 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1863 if (fd < 0) { 1864 perror("accept"); 1865 exit(EXIT_FAILURE); 1866 } 1867 1868 vsock_wait_remote_close(fd); 1869 close(fd); 1870 } 1871 1872 #define TRANSPORT_CHANGE_TIMEOUT 2 /* seconds */ 1873 1874 static void *test_stream_transport_change_thread(void *vargp) 1875 { 1876 pid_t *pid = (pid_t *)vargp; 1877 int ret; 1878 1879 ret = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); 1880 if (ret) { 1881 fprintf(stderr, "pthread_setcanceltype: %d\n", ret); 1882 exit(EXIT_FAILURE); 1883 } 1884 1885 while (true) { 1886 if (kill(*pid, SIGUSR1) < 0) { 1887 perror("kill"); 1888 exit(EXIT_FAILURE); 1889 } 1890 } 1891 return NULL; 1892 } 1893 1894 static void test_transport_change_signal_handler(int signal) 1895 { 1896 /* We need a custom handler for SIGUSR1 as the default one terminates the process. */ 1897 } 1898 1899 static void test_stream_transport_change_client(const struct test_opts *opts) 1900 { 1901 __sighandler_t old_handler; 1902 pid_t pid = getpid(); 1903 pthread_t thread_id; 1904 time_t tout; 1905 int ret, tr; 1906 1907 tr = get_transports(); 1908 1909 /* Print a warning if there is a G2H transport loaded. 1910 * This is on a best effort basis because VMCI can be either G2H and H2G, and there is 1911 * no easy way to understand it. 1912 * The bug we are testing only appears when G2H transports are not loaded. 1913 * This is because `vsock_assign_transport`, when using CID 0, assigns a G2H transport 1914 * to vsk->transport. If none is available it is set to NULL, causing the null-ptr-deref. 1915 */ 1916 if (tr & TRANSPORTS_G2H) 1917 fprintf(stderr, "G2H Transport detected. This test will not fail.\n"); 1918 1919 old_handler = signal(SIGUSR1, test_transport_change_signal_handler); 1920 if (old_handler == SIG_ERR) { 1921 perror("signal"); 1922 exit(EXIT_FAILURE); 1923 } 1924 1925 ret = pthread_create(&thread_id, NULL, test_stream_transport_change_thread, &pid); 1926 if (ret) { 1927 fprintf(stderr, "pthread_create: %d\n", ret); 1928 exit(EXIT_FAILURE); 1929 } 1930 1931 control_expectln("LISTENING"); 1932 1933 tout = current_nsec() + TRANSPORT_CHANGE_TIMEOUT * NSEC_PER_SEC; 1934 do { 1935 struct sockaddr_vm sa = { 1936 .svm_family = AF_VSOCK, 1937 .svm_cid = opts->peer_cid, 1938 .svm_port = opts->peer_port, 1939 }; 1940 int s; 1941 1942 s = socket(AF_VSOCK, SOCK_STREAM, 0); 1943 if (s < 0) { 1944 perror("socket"); 1945 exit(EXIT_FAILURE); 1946 } 1947 1948 ret = connect(s, (struct sockaddr *)&sa, sizeof(sa)); 1949 /* The connect can fail due to signals coming from the thread, 1950 * or because the receiver connection queue is full. 1951 * Ignoring also the latter case because there is no way 1952 * of synchronizing client's connect and server's accept when 1953 * connect(s) are constantly being interrupted by signals. 1954 */ 1955 if (ret == -1 && (errno != EINTR && errno != ECONNRESET)) { 1956 perror("connect"); 1957 exit(EXIT_FAILURE); 1958 } 1959 1960 /* Set CID to 0 cause a transport change. */ 1961 sa.svm_cid = 0; 1962 1963 /* Ignore return value since it can fail or not. 1964 * If the previous connect is interrupted while the 1965 * connection request is already sent, the second 1966 * connect() will wait for the response. 1967 */ 1968 connect(s, (struct sockaddr *)&sa, sizeof(sa)); 1969 1970 close(s); 1971 1972 control_writeulong(CONTROL_CONTINUE); 1973 1974 } while (current_nsec() < tout); 1975 1976 control_writeulong(CONTROL_DONE); 1977 1978 ret = pthread_cancel(thread_id); 1979 if (ret) { 1980 fprintf(stderr, "pthread_cancel: %d\n", ret); 1981 exit(EXIT_FAILURE); 1982 } 1983 1984 ret = pthread_join(thread_id, NULL); 1985 if (ret) { 1986 fprintf(stderr, "pthread_join: %d\n", ret); 1987 exit(EXIT_FAILURE); 1988 } 1989 1990 if (signal(SIGUSR1, old_handler) == SIG_ERR) { 1991 perror("signal"); 1992 exit(EXIT_FAILURE); 1993 } 1994 } 1995 1996 static void test_stream_transport_change_server(const struct test_opts *opts) 1997 { 1998 int s = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port); 1999 2000 /* Set the socket to be nonblocking because connects that have been interrupted 2001 * (EINTR) can fill the receiver's accept queue anyway, leading to connect failure. 2002 * As of today (6.15) in such situation there is no way to understand, from the 2003 * client side, if the connection has been queued in the server or not. 2004 */ 2005 if (fcntl(s, F_SETFL, fcntl(s, F_GETFL, 0) | O_NONBLOCK) < 0) { 2006 perror("fcntl"); 2007 exit(EXIT_FAILURE); 2008 } 2009 control_writeln("LISTENING"); 2010 2011 while (control_readulong() == CONTROL_CONTINUE) { 2012 /* Must accept the connection, otherwise the `listen` 2013 * queue will fill up and new connections will fail. 2014 * There can be more than one queued connection, 2015 * clear them all. 2016 */ 2017 while (true) { 2018 int client = accept(s, NULL, NULL); 2019 2020 if (client < 0) { 2021 if (errno == EAGAIN) 2022 break; 2023 2024 perror("accept"); 2025 exit(EXIT_FAILURE); 2026 } 2027 2028 close(client); 2029 } 2030 } 2031 2032 close(s); 2033 } 2034 2035 static void test_stream_linger_client(const struct test_opts *opts) 2036 { 2037 int fd; 2038 2039 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 2040 if (fd < 0) { 2041 perror("connect"); 2042 exit(EXIT_FAILURE); 2043 } 2044 2045 enable_so_linger(fd, 1); 2046 close(fd); 2047 } 2048 2049 static void test_stream_linger_server(const struct test_opts *opts) 2050 { 2051 int fd; 2052 2053 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 2054 if (fd < 0) { 2055 perror("accept"); 2056 exit(EXIT_FAILURE); 2057 } 2058 2059 vsock_wait_remote_close(fd); 2060 close(fd); 2061 } 2062 2063 /* Half of the default to not risk timing out the control channel */ 2064 #define LINGER_TIMEOUT (TIMEOUT / 2) 2065 2066 static void test_stream_nolinger_client(const struct test_opts *opts) 2067 { 2068 bool waited; 2069 time_t ns; 2070 int fd; 2071 2072 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 2073 if (fd < 0) { 2074 perror("connect"); 2075 exit(EXIT_FAILURE); 2076 } 2077 2078 enable_so_linger(fd, LINGER_TIMEOUT); 2079 send_byte(fd, 1, 0); /* Left unread to expose incorrect behaviour. */ 2080 waited = vsock_wait_sent(fd); 2081 2082 ns = current_nsec(); 2083 close(fd); 2084 ns = current_nsec() - ns; 2085 2086 if (!waited) { 2087 fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n"); 2088 } else if (DIV_ROUND_UP(ns, NSEC_PER_SEC) >= LINGER_TIMEOUT) { 2089 fprintf(stderr, "Unexpected lingering\n"); 2090 exit(EXIT_FAILURE); 2091 } 2092 2093 control_writeln("DONE"); 2094 } 2095 2096 static void test_stream_nolinger_server(const struct test_opts *opts) 2097 { 2098 int fd; 2099 2100 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 2101 if (fd < 0) { 2102 perror("accept"); 2103 exit(EXIT_FAILURE); 2104 } 2105 2106 control_expectln("DONE"); 2107 close(fd); 2108 } 2109 2110 static struct test_case test_cases[] = { 2111 { 2112 .name = "SOCK_STREAM connection reset", 2113 .run_client = test_stream_connection_reset, 2114 }, 2115 { 2116 .name = "SOCK_STREAM bind only", 2117 .run_client = test_stream_bind_only_client, 2118 .run_server = test_stream_bind_only_server, 2119 }, 2120 { 2121 .name = "SOCK_STREAM client close", 2122 .run_client = test_stream_client_close_client, 2123 .run_server = test_stream_client_close_server, 2124 }, 2125 { 2126 .name = "SOCK_STREAM server close", 2127 .run_client = test_stream_server_close_client, 2128 .run_server = test_stream_server_close_server, 2129 }, 2130 { 2131 .name = "SOCK_STREAM multiple connections", 2132 .run_client = test_stream_multiconn_client, 2133 .run_server = test_stream_multiconn_server, 2134 }, 2135 { 2136 .name = "SOCK_STREAM MSG_PEEK", 2137 .run_client = test_stream_msg_peek_client, 2138 .run_server = test_stream_msg_peek_server, 2139 }, 2140 { 2141 .name = "SOCK_SEQPACKET msg bounds", 2142 .run_client = test_seqpacket_msg_bounds_client, 2143 .run_server = test_seqpacket_msg_bounds_server, 2144 }, 2145 { 2146 .name = "SOCK_SEQPACKET MSG_TRUNC flag", 2147 .run_client = test_seqpacket_msg_trunc_client, 2148 .run_server = test_seqpacket_msg_trunc_server, 2149 }, 2150 { 2151 .name = "SOCK_SEQPACKET timeout", 2152 .run_client = test_seqpacket_timeout_client, 2153 .run_server = test_seqpacket_timeout_server, 2154 }, 2155 { 2156 .name = "SOCK_SEQPACKET invalid receive buffer", 2157 .run_client = test_seqpacket_invalid_rec_buffer_client, 2158 .run_server = test_seqpacket_invalid_rec_buffer_server, 2159 }, 2160 { 2161 .name = "SOCK_STREAM poll() + SO_RCVLOWAT", 2162 .run_client = test_stream_poll_rcvlowat_client, 2163 .run_server = test_stream_poll_rcvlowat_server, 2164 }, 2165 { 2166 .name = "SOCK_SEQPACKET big message", 2167 .run_client = test_seqpacket_bigmsg_client, 2168 .run_server = test_seqpacket_bigmsg_server, 2169 }, 2170 { 2171 .name = "SOCK_STREAM test invalid buffer", 2172 .run_client = test_stream_inv_buf_client, 2173 .run_server = test_stream_inv_buf_server, 2174 }, 2175 { 2176 .name = "SOCK_SEQPACKET test invalid buffer", 2177 .run_client = test_seqpacket_inv_buf_client, 2178 .run_server = test_seqpacket_inv_buf_server, 2179 }, 2180 { 2181 .name = "SOCK_STREAM virtio skb merge", 2182 .run_client = test_stream_virtio_skb_merge_client, 2183 .run_server = test_stream_virtio_skb_merge_server, 2184 }, 2185 { 2186 .name = "SOCK_SEQPACKET MSG_PEEK", 2187 .run_client = test_seqpacket_msg_peek_client, 2188 .run_server = test_seqpacket_msg_peek_server, 2189 }, 2190 { 2191 .name = "SOCK_STREAM SHUT_WR", 2192 .run_client = test_stream_shutwr_client, 2193 .run_server = test_stream_shutwr_server, 2194 }, 2195 { 2196 .name = "SOCK_STREAM SHUT_RD", 2197 .run_client = test_stream_shutrd_client, 2198 .run_server = test_stream_shutrd_server, 2199 }, 2200 { 2201 .name = "SOCK_STREAM MSG_ZEROCOPY", 2202 .run_client = test_stream_msgzcopy_client, 2203 .run_server = test_stream_msgzcopy_server, 2204 }, 2205 { 2206 .name = "SOCK_SEQPACKET MSG_ZEROCOPY", 2207 .run_client = test_seqpacket_msgzcopy_client, 2208 .run_server = test_seqpacket_msgzcopy_server, 2209 }, 2210 { 2211 .name = "SOCK_STREAM MSG_ZEROCOPY empty MSG_ERRQUEUE", 2212 .run_client = test_stream_msgzcopy_empty_errq_client, 2213 .run_server = test_stream_msgzcopy_empty_errq_server, 2214 }, 2215 { 2216 .name = "SOCK_STREAM double bind connect", 2217 .run_client = test_double_bind_connect_client, 2218 .run_server = test_double_bind_connect_server, 2219 }, 2220 { 2221 .name = "SOCK_STREAM virtio credit update + SO_RCVLOWAT", 2222 .run_client = test_stream_rcvlowat_def_cred_upd_client, 2223 .run_server = test_stream_cred_upd_on_set_rcvlowat, 2224 }, 2225 { 2226 .name = "SOCK_STREAM virtio credit update + low rx_bytes", 2227 .run_client = test_stream_rcvlowat_def_cred_upd_client, 2228 .run_server = test_stream_cred_upd_on_low_rx_bytes, 2229 }, 2230 { 2231 .name = "SOCK_STREAM ioctl(SIOCOUTQ) 0 unsent bytes", 2232 .run_client = test_stream_unsent_bytes_client, 2233 .run_server = test_stream_unsent_bytes_server, 2234 }, 2235 { 2236 .name = "SOCK_SEQPACKET ioctl(SIOCOUTQ) 0 unsent bytes", 2237 .run_client = test_seqpacket_unsent_bytes_client, 2238 .run_server = test_seqpacket_unsent_bytes_server, 2239 }, 2240 { 2241 .name = "SOCK_STREAM leak accept queue", 2242 .run_client = test_stream_leak_acceptq_client, 2243 .run_server = test_stream_leak_acceptq_server, 2244 }, 2245 { 2246 .name = "SOCK_STREAM MSG_ZEROCOPY leak MSG_ERRQUEUE", 2247 .run_client = test_stream_msgzcopy_leak_errq_client, 2248 .run_server = test_stream_msgzcopy_leak_errq_server, 2249 }, 2250 { 2251 .name = "SOCK_STREAM MSG_ZEROCOPY leak completion skb", 2252 .run_client = test_stream_msgzcopy_leak_zcskb_client, 2253 .run_server = test_stream_msgzcopy_leak_zcskb_server, 2254 }, 2255 { 2256 .name = "SOCK_STREAM transport release use-after-free", 2257 .run_client = test_stream_transport_uaf_client, 2258 }, 2259 { 2260 .name = "SOCK_STREAM retry failed connect()", 2261 .run_client = test_stream_connect_retry_client, 2262 .run_server = test_stream_connect_retry_server, 2263 }, 2264 { 2265 .name = "SOCK_STREAM SO_LINGER null-ptr-deref", 2266 .run_client = test_stream_linger_client, 2267 .run_server = test_stream_linger_server, 2268 }, 2269 { 2270 .name = "SOCK_STREAM SO_LINGER close() on unread", 2271 .run_client = test_stream_nolinger_client, 2272 .run_server = test_stream_nolinger_server, 2273 }, 2274 { 2275 .name = "SOCK_STREAM transport change null-ptr-deref", 2276 .run_client = test_stream_transport_change_client, 2277 .run_server = test_stream_transport_change_server, 2278 }, 2279 {}, 2280 }; 2281 2282 static const char optstring[] = ""; 2283 static const struct option longopts[] = { 2284 { 2285 .name = "control-host", 2286 .has_arg = required_argument, 2287 .val = 'H', 2288 }, 2289 { 2290 .name = "control-port", 2291 .has_arg = required_argument, 2292 .val = 'P', 2293 }, 2294 { 2295 .name = "mode", 2296 .has_arg = required_argument, 2297 .val = 'm', 2298 }, 2299 { 2300 .name = "peer-cid", 2301 .has_arg = required_argument, 2302 .val = 'p', 2303 }, 2304 { 2305 .name = "peer-port", 2306 .has_arg = required_argument, 2307 .val = 'q', 2308 }, 2309 { 2310 .name = "list", 2311 .has_arg = no_argument, 2312 .val = 'l', 2313 }, 2314 { 2315 .name = "skip", 2316 .has_arg = required_argument, 2317 .val = 's', 2318 }, 2319 { 2320 .name = "pick", 2321 .has_arg = required_argument, 2322 .val = 't', 2323 }, 2324 { 2325 .name = "help", 2326 .has_arg = no_argument, 2327 .val = '?', 2328 }, 2329 {}, 2330 }; 2331 2332 static void usage(void) 2333 { 2334 fprintf(stderr, "Usage: vsock_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid> [--peer-port=<port>] [--list] [--skip=<test_id>]\n" 2335 "\n" 2336 " Server: vsock_test --control-port=1234 --mode=server --peer-cid=3\n" 2337 " Client: vsock_test --control-host=192.168.0.1 --control-port=1234 --mode=client --peer-cid=2\n" 2338 "\n" 2339 "Run vsock.ko tests. Must be launched in both guest\n" 2340 "and host. One side must use --mode=client and\n" 2341 "the other side must use --mode=server.\n" 2342 "\n" 2343 "A TCP control socket connection is used to coordinate tests\n" 2344 "between the client and the server. The server requires a\n" 2345 "listen address and the client requires an address to\n" 2346 "connect to.\n" 2347 "\n" 2348 "The CID of the other side must be given with --peer-cid=<cid>.\n" 2349 "During the test, two AF_VSOCK ports will be used: the port\n" 2350 "specified with --peer-port=<port> (or the default port)\n" 2351 "and the next one.\n" 2352 "\n" 2353 "Options:\n" 2354 " --help This help message\n" 2355 " --control-host <host> Server IP address to connect to\n" 2356 " --control-port <port> Server port to listen on/connect to\n" 2357 " --mode client|server Server or client mode\n" 2358 " --peer-cid <cid> CID of the other side\n" 2359 " --peer-port <port> AF_VSOCK port used for the test [default: %d]\n" 2360 " --list List of tests that will be executed\n" 2361 " --pick <test_id> Test ID to execute selectively;\n" 2362 " use multiple --pick options to select more tests\n" 2363 " --skip <test_id> Test ID to skip;\n" 2364 " use multiple --skip options to skip more tests\n", 2365 DEFAULT_PEER_PORT 2366 ); 2367 exit(EXIT_FAILURE); 2368 } 2369 2370 int main(int argc, char **argv) 2371 { 2372 const char *control_host = NULL; 2373 const char *control_port = NULL; 2374 struct test_opts opts = { 2375 .mode = TEST_MODE_UNSET, 2376 .peer_cid = VMADDR_CID_ANY, 2377 .peer_port = DEFAULT_PEER_PORT, 2378 }; 2379 2380 srand(time(NULL)); 2381 init_signals(); 2382 2383 for (;;) { 2384 int opt = getopt_long(argc, argv, optstring, longopts, NULL); 2385 2386 if (opt == -1) 2387 break; 2388 2389 switch (opt) { 2390 case 'H': 2391 control_host = optarg; 2392 break; 2393 case 'm': 2394 if (strcmp(optarg, "client") == 0) 2395 opts.mode = TEST_MODE_CLIENT; 2396 else if (strcmp(optarg, "server") == 0) 2397 opts.mode = TEST_MODE_SERVER; 2398 else { 2399 fprintf(stderr, "--mode must be \"client\" or \"server\"\n"); 2400 return EXIT_FAILURE; 2401 } 2402 break; 2403 case 'p': 2404 opts.peer_cid = parse_cid(optarg); 2405 break; 2406 case 'q': 2407 opts.peer_port = parse_port(optarg); 2408 break; 2409 case 'P': 2410 control_port = optarg; 2411 break; 2412 case 'l': 2413 list_tests(test_cases); 2414 break; 2415 case 's': 2416 skip_test(test_cases, ARRAY_SIZE(test_cases) - 1, 2417 optarg); 2418 break; 2419 case 't': 2420 pick_test(test_cases, ARRAY_SIZE(test_cases) - 1, 2421 optarg); 2422 break; 2423 case '?': 2424 default: 2425 usage(); 2426 } 2427 } 2428 2429 if (!control_port) 2430 usage(); 2431 if (opts.mode == TEST_MODE_UNSET) 2432 usage(); 2433 if (opts.peer_cid == VMADDR_CID_ANY) 2434 usage(); 2435 2436 if (!control_host) { 2437 if (opts.mode != TEST_MODE_SERVER) 2438 usage(); 2439 control_host = "0.0.0.0"; 2440 } 2441 2442 control_init(control_host, control_port, 2443 opts.mode == TEST_MODE_SERVER); 2444 2445 run_tests(test_cases, &opts); 2446 2447 control_cleanup(); 2448 return EXIT_SUCCESS; 2449 } 2450