1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vsock_test - vsock.ko test suite 4 * 5 * Copyright (C) 2017 Red Hat, Inc. 6 * 7 * Author: Stefan Hajnoczi <stefanha@redhat.com> 8 */ 9 10 #include <getopt.h> 11 #include <stdio.h> 12 #include <stdlib.h> 13 #include <string.h> 14 #include <errno.h> 15 #include <unistd.h> 16 #include <linux/kernel.h> 17 #include <sys/types.h> 18 #include <sys/socket.h> 19 #include <time.h> 20 #include <sys/mman.h> 21 #include <poll.h> 22 #include <signal.h> 23 #include <sys/ioctl.h> 24 #include <linux/time64.h> 25 #include <pthread.h> 26 #include <fcntl.h> 27 #include <linux/sockios.h> 28 29 #include "vsock_test_zerocopy.h" 30 #include "timeout.h" 31 #include "control.h" 32 #include "util.h" 33 34 /* Basic messages for control_writeulong(), control_readulong() */ 35 #define CONTROL_CONTINUE 1 36 #define CONTROL_DONE 0 37 38 static void test_stream_connection_reset(const struct test_opts *opts) 39 { 40 union { 41 struct sockaddr sa; 42 struct sockaddr_vm svm; 43 } addr = { 44 .svm = { 45 .svm_family = AF_VSOCK, 46 .svm_port = opts->peer_port, 47 .svm_cid = opts->peer_cid, 48 }, 49 }; 50 int ret; 51 int fd; 52 53 fd = socket(AF_VSOCK, SOCK_STREAM, 0); 54 55 timeout_begin(TIMEOUT); 56 do { 57 ret = connect(fd, &addr.sa, sizeof(addr.svm)); 58 timeout_check("connect"); 59 } while (ret < 0 && errno == EINTR); 60 timeout_end(); 61 62 if (ret != -1) { 63 fprintf(stderr, "expected connect(2) failure, got %d\n", ret); 64 exit(EXIT_FAILURE); 65 } 66 if (errno != ECONNRESET) { 67 fprintf(stderr, "unexpected connect(2) errno %d\n", errno); 68 exit(EXIT_FAILURE); 69 } 70 71 close(fd); 72 } 73 74 static void test_stream_bind_only_client(const struct test_opts *opts) 75 { 76 union { 77 struct sockaddr sa; 78 struct sockaddr_vm svm; 79 } addr = { 80 .svm = { 81 .svm_family = AF_VSOCK, 82 .svm_port = opts->peer_port, 83 .svm_cid = opts->peer_cid, 84 }, 85 }; 86 int ret; 87 int fd; 88 89 /* Wait for the server to be ready */ 90 control_expectln("BIND"); 91 92 fd = socket(AF_VSOCK, SOCK_STREAM, 0); 93 94 timeout_begin(TIMEOUT); 95 do { 96 ret = connect(fd, &addr.sa, sizeof(addr.svm)); 97 timeout_check("connect"); 98 } while (ret < 0 && errno == EINTR); 99 timeout_end(); 100 101 if (ret != -1) { 102 fprintf(stderr, "expected connect(2) failure, got %d\n", ret); 103 exit(EXIT_FAILURE); 104 } 105 if (errno != ECONNRESET) { 106 fprintf(stderr, "unexpected connect(2) errno %d\n", errno); 107 exit(EXIT_FAILURE); 108 } 109 110 /* Notify the server that the client has finished */ 111 control_writeln("DONE"); 112 113 close(fd); 114 } 115 116 static void test_stream_bind_only_server(const struct test_opts *opts) 117 { 118 int fd; 119 120 fd = vsock_bind(VMADDR_CID_ANY, opts->peer_port, SOCK_STREAM); 121 122 /* Notify the client that the server is ready */ 123 control_writeln("BIND"); 124 125 /* Wait for the client to finish */ 126 control_expectln("DONE"); 127 128 close(fd); 129 } 130 131 static void test_stream_client_close_client(const struct test_opts *opts) 132 { 133 int fd; 134 135 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 136 if (fd < 0) { 137 perror("connect"); 138 exit(EXIT_FAILURE); 139 } 140 141 send_byte(fd, 1, 0); 142 close(fd); 143 } 144 145 static void test_stream_client_close_server(const struct test_opts *opts) 146 { 147 int fd; 148 149 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 150 if (fd < 0) { 151 perror("accept"); 152 exit(EXIT_FAILURE); 153 } 154 155 /* Wait for the remote to close the connection, before check 156 * -EPIPE error on send. 157 */ 158 vsock_wait_remote_close(fd); 159 160 send_byte(fd, -EPIPE, 0); 161 recv_byte(fd, 1, 0); 162 recv_byte(fd, 0, 0); 163 close(fd); 164 } 165 166 static void test_stream_server_close_client(const struct test_opts *opts) 167 { 168 int fd; 169 170 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 171 if (fd < 0) { 172 perror("connect"); 173 exit(EXIT_FAILURE); 174 } 175 176 /* Wait for the remote to close the connection, before check 177 * -EPIPE error on send. 178 */ 179 vsock_wait_remote_close(fd); 180 181 send_byte(fd, -EPIPE, 0); 182 recv_byte(fd, 1, 0); 183 recv_byte(fd, 0, 0); 184 close(fd); 185 } 186 187 static void test_stream_server_close_server(const struct test_opts *opts) 188 { 189 int fd; 190 191 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 192 if (fd < 0) { 193 perror("accept"); 194 exit(EXIT_FAILURE); 195 } 196 197 send_byte(fd, 1, 0); 198 close(fd); 199 } 200 201 /* With the standard socket sizes, VMCI is able to support about 100 202 * concurrent stream connections. 203 */ 204 #define MULTICONN_NFDS 100 205 206 static void test_stream_multiconn_client(const struct test_opts *opts) 207 { 208 int fds[MULTICONN_NFDS]; 209 int i; 210 211 for (i = 0; i < MULTICONN_NFDS; i++) { 212 fds[i] = vsock_stream_connect(opts->peer_cid, opts->peer_port); 213 if (fds[i] < 0) { 214 perror("connect"); 215 exit(EXIT_FAILURE); 216 } 217 } 218 219 for (i = 0; i < MULTICONN_NFDS; i++) { 220 if (i % 2) 221 recv_byte(fds[i], 1, 0); 222 else 223 send_byte(fds[i], 1, 0); 224 } 225 226 for (i = 0; i < MULTICONN_NFDS; i++) 227 close(fds[i]); 228 } 229 230 static void test_stream_multiconn_server(const struct test_opts *opts) 231 { 232 int fds[MULTICONN_NFDS]; 233 int i; 234 235 for (i = 0; i < MULTICONN_NFDS; i++) { 236 fds[i] = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 237 if (fds[i] < 0) { 238 perror("accept"); 239 exit(EXIT_FAILURE); 240 } 241 } 242 243 for (i = 0; i < MULTICONN_NFDS; i++) { 244 if (i % 2) 245 send_byte(fds[i], 1, 0); 246 else 247 recv_byte(fds[i], 1, 0); 248 } 249 250 for (i = 0; i < MULTICONN_NFDS; i++) 251 close(fds[i]); 252 } 253 254 #define MSG_PEEK_BUF_LEN 64 255 256 static void test_msg_peek_client(const struct test_opts *opts, 257 bool seqpacket) 258 { 259 unsigned char buf[MSG_PEEK_BUF_LEN]; 260 int fd; 261 int i; 262 263 if (seqpacket) 264 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 265 else 266 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 267 268 if (fd < 0) { 269 perror("connect"); 270 exit(EXIT_FAILURE); 271 } 272 273 for (i = 0; i < sizeof(buf); i++) 274 buf[i] = rand() & 0xFF; 275 276 control_expectln("SRVREADY"); 277 278 send_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 279 280 close(fd); 281 } 282 283 static void test_msg_peek_server(const struct test_opts *opts, 284 bool seqpacket) 285 { 286 unsigned char buf_half[MSG_PEEK_BUF_LEN / 2]; 287 unsigned char buf_normal[MSG_PEEK_BUF_LEN]; 288 unsigned char buf_peek[MSG_PEEK_BUF_LEN]; 289 int fd; 290 291 if (seqpacket) 292 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 293 else 294 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 295 296 if (fd < 0) { 297 perror("accept"); 298 exit(EXIT_FAILURE); 299 } 300 301 /* Peek from empty socket. */ 302 recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK | MSG_DONTWAIT, 303 -EAGAIN); 304 305 control_writeln("SRVREADY"); 306 307 /* Peek part of data. */ 308 recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK, sizeof(buf_half)); 309 310 /* Peek whole data. */ 311 recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK, sizeof(buf_peek)); 312 313 /* Compare partial and full peek. */ 314 if (memcmp(buf_half, buf_peek, sizeof(buf_half))) { 315 fprintf(stderr, "Partial peek data mismatch\n"); 316 exit(EXIT_FAILURE); 317 } 318 319 if (seqpacket) { 320 /* This type of socket supports MSG_TRUNC flag, 321 * so check it with MSG_PEEK. We must get length 322 * of the message. 323 */ 324 recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK | MSG_TRUNC, 325 sizeof(buf_peek)); 326 } 327 328 recv_buf(fd, buf_normal, sizeof(buf_normal), 0, sizeof(buf_normal)); 329 330 /* Compare full peek and normal read. */ 331 if (memcmp(buf_peek, buf_normal, sizeof(buf_peek))) { 332 fprintf(stderr, "Full peek data mismatch\n"); 333 exit(EXIT_FAILURE); 334 } 335 336 close(fd); 337 } 338 339 static void test_stream_msg_peek_client(const struct test_opts *opts) 340 { 341 return test_msg_peek_client(opts, false); 342 } 343 344 static void test_stream_msg_peek_server(const struct test_opts *opts) 345 { 346 return test_msg_peek_server(opts, false); 347 } 348 349 #define SOCK_BUF_SIZE (2 * 1024 * 1024) 350 #define MAX_MSG_PAGES 4 351 352 static void test_seqpacket_msg_bounds_client(const struct test_opts *opts) 353 { 354 unsigned long long sock_buf_size; 355 unsigned long curr_hash; 356 size_t max_msg_size; 357 int page_size; 358 int msg_count; 359 int fd; 360 361 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 362 if (fd < 0) { 363 perror("connect"); 364 exit(EXIT_FAILURE); 365 } 366 367 sock_buf_size = SOCK_BUF_SIZE; 368 369 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, 370 sock_buf_size, 371 "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); 372 373 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 374 sock_buf_size, 375 "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); 376 377 /* Wait, until receiver sets buffer size. */ 378 control_expectln("SRVREADY"); 379 380 curr_hash = 0; 381 page_size = getpagesize(); 382 max_msg_size = MAX_MSG_PAGES * page_size; 383 msg_count = SOCK_BUF_SIZE / max_msg_size; 384 385 for (int i = 0; i < msg_count; i++) { 386 size_t buf_size; 387 int flags; 388 void *buf; 389 390 /* Use "small" buffers and "big" buffers. */ 391 if (i & 1) 392 buf_size = page_size + 393 (rand() % (max_msg_size - page_size)); 394 else 395 buf_size = 1 + (rand() % page_size); 396 397 buf = malloc(buf_size); 398 399 if (!buf) { 400 perror("malloc"); 401 exit(EXIT_FAILURE); 402 } 403 404 memset(buf, rand() & 0xff, buf_size); 405 /* Set at least one MSG_EOR + some random. */ 406 if (i == (msg_count / 2) || (rand() & 1)) { 407 flags = MSG_EOR; 408 curr_hash++; 409 } else { 410 flags = 0; 411 } 412 413 send_buf(fd, buf, buf_size, flags, buf_size); 414 415 /* 416 * Hash sum is computed at both client and server in 417 * the same way: 418 * H += hash('message data') 419 * Such hash "controls" both data integrity and message 420 * bounds. After data exchange, both sums are compared 421 * using control socket, and if message bounds wasn't 422 * broken - two values must be equal. 423 */ 424 curr_hash += hash_djb2(buf, buf_size); 425 free(buf); 426 } 427 428 control_writeln("SENDDONE"); 429 control_writeulong(curr_hash); 430 close(fd); 431 } 432 433 static void test_seqpacket_msg_bounds_server(const struct test_opts *opts) 434 { 435 unsigned long long sock_buf_size; 436 unsigned long remote_hash; 437 unsigned long curr_hash; 438 int fd; 439 struct msghdr msg = {0}; 440 struct iovec iov = {0}; 441 442 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 443 if (fd < 0) { 444 perror("accept"); 445 exit(EXIT_FAILURE); 446 } 447 448 sock_buf_size = SOCK_BUF_SIZE; 449 450 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, 451 sock_buf_size, 452 "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); 453 454 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 455 sock_buf_size, 456 "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); 457 458 /* Ready to receive data. */ 459 control_writeln("SRVREADY"); 460 /* Wait, until peer sends whole data. */ 461 control_expectln("SENDDONE"); 462 iov.iov_len = MAX_MSG_PAGES * getpagesize(); 463 iov.iov_base = malloc(iov.iov_len); 464 if (!iov.iov_base) { 465 perror("malloc"); 466 exit(EXIT_FAILURE); 467 } 468 469 msg.msg_iov = &iov; 470 msg.msg_iovlen = 1; 471 472 curr_hash = 0; 473 474 while (1) { 475 ssize_t recv_size; 476 477 recv_size = recvmsg(fd, &msg, 0); 478 479 if (!recv_size) 480 break; 481 482 if (recv_size < 0) { 483 perror("recvmsg"); 484 exit(EXIT_FAILURE); 485 } 486 487 if (msg.msg_flags & MSG_EOR) 488 curr_hash++; 489 490 curr_hash += hash_djb2(msg.msg_iov[0].iov_base, recv_size); 491 } 492 493 free(iov.iov_base); 494 close(fd); 495 remote_hash = control_readulong(); 496 497 if (curr_hash != remote_hash) { 498 fprintf(stderr, "Message bounds broken\n"); 499 exit(EXIT_FAILURE); 500 } 501 } 502 503 #define MESSAGE_TRUNC_SZ 32 504 static void test_seqpacket_msg_trunc_client(const struct test_opts *opts) 505 { 506 int fd; 507 char buf[MESSAGE_TRUNC_SZ]; 508 509 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 510 if (fd < 0) { 511 perror("connect"); 512 exit(EXIT_FAILURE); 513 } 514 515 send_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 516 517 control_writeln("SENDDONE"); 518 close(fd); 519 } 520 521 static void test_seqpacket_msg_trunc_server(const struct test_opts *opts) 522 { 523 int fd; 524 char buf[MESSAGE_TRUNC_SZ / 2]; 525 struct msghdr msg = {0}; 526 struct iovec iov = {0}; 527 528 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 529 if (fd < 0) { 530 perror("accept"); 531 exit(EXIT_FAILURE); 532 } 533 534 control_expectln("SENDDONE"); 535 iov.iov_base = buf; 536 iov.iov_len = sizeof(buf); 537 msg.msg_iov = &iov; 538 msg.msg_iovlen = 1; 539 540 ssize_t ret = recvmsg(fd, &msg, MSG_TRUNC); 541 542 if (ret != MESSAGE_TRUNC_SZ) { 543 printf("%zi\n", ret); 544 perror("MSG_TRUNC doesn't work"); 545 exit(EXIT_FAILURE); 546 } 547 548 if (!(msg.msg_flags & MSG_TRUNC)) { 549 fprintf(stderr, "MSG_TRUNC expected\n"); 550 exit(EXIT_FAILURE); 551 } 552 553 close(fd); 554 } 555 556 static time_t current_nsec(void) 557 { 558 struct timespec ts; 559 560 if (clock_gettime(CLOCK_REALTIME, &ts)) { 561 perror("clock_gettime(3) failed"); 562 exit(EXIT_FAILURE); 563 } 564 565 return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec; 566 } 567 568 #define RCVTIMEO_TIMEOUT_SEC 1 569 #define READ_OVERHEAD_NSEC 250000000 /* 0.25 sec */ 570 571 static void test_seqpacket_timeout_client(const struct test_opts *opts) 572 { 573 int fd; 574 struct timeval tv; 575 char dummy; 576 time_t read_enter_ns; 577 time_t read_overhead_ns; 578 579 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 580 if (fd < 0) { 581 perror("connect"); 582 exit(EXIT_FAILURE); 583 } 584 585 tv.tv_sec = RCVTIMEO_TIMEOUT_SEC; 586 tv.tv_usec = 0; 587 588 setsockopt_timeval_check(fd, SOL_SOCKET, SO_RCVTIMEO, tv, 589 "setsockopt(SO_RCVTIMEO)"); 590 591 read_enter_ns = current_nsec(); 592 593 if (read(fd, &dummy, sizeof(dummy)) != -1) { 594 fprintf(stderr, 595 "expected 'dummy' read(2) failure\n"); 596 exit(EXIT_FAILURE); 597 } 598 599 if (errno != EAGAIN) { 600 perror("EAGAIN expected"); 601 exit(EXIT_FAILURE); 602 } 603 604 read_overhead_ns = current_nsec() - read_enter_ns - 605 NSEC_PER_SEC * RCVTIMEO_TIMEOUT_SEC; 606 607 if (read_overhead_ns > READ_OVERHEAD_NSEC) { 608 fprintf(stderr, 609 "too much time in read(2), %lu > %i ns\n", 610 read_overhead_ns, READ_OVERHEAD_NSEC); 611 exit(EXIT_FAILURE); 612 } 613 614 control_writeln("WAITDONE"); 615 close(fd); 616 } 617 618 static void test_seqpacket_timeout_server(const struct test_opts *opts) 619 { 620 int fd; 621 622 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 623 if (fd < 0) { 624 perror("accept"); 625 exit(EXIT_FAILURE); 626 } 627 628 control_expectln("WAITDONE"); 629 close(fd); 630 } 631 632 static void test_seqpacket_bigmsg_client(const struct test_opts *opts) 633 { 634 unsigned long long sock_buf_size; 635 size_t buf_size; 636 socklen_t len; 637 void *data; 638 int fd; 639 640 len = sizeof(sock_buf_size); 641 642 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 643 if (fd < 0) { 644 perror("connect"); 645 exit(EXIT_FAILURE); 646 } 647 648 if (getsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 649 &sock_buf_size, &len)) { 650 perror("getsockopt"); 651 exit(EXIT_FAILURE); 652 } 653 654 sock_buf_size++; 655 656 /* size_t can be < unsigned long long */ 657 buf_size = (size_t)sock_buf_size; 658 if (buf_size != sock_buf_size) { 659 fprintf(stderr, "Returned BUFFER_SIZE too large\n"); 660 exit(EXIT_FAILURE); 661 } 662 663 data = malloc(buf_size); 664 if (!data) { 665 perror("malloc"); 666 exit(EXIT_FAILURE); 667 } 668 669 send_buf(fd, data, buf_size, 0, -EMSGSIZE); 670 671 control_writeln("CLISENT"); 672 673 free(data); 674 close(fd); 675 } 676 677 static void test_seqpacket_bigmsg_server(const struct test_opts *opts) 678 { 679 int fd; 680 681 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 682 if (fd < 0) { 683 perror("accept"); 684 exit(EXIT_FAILURE); 685 } 686 687 control_expectln("CLISENT"); 688 689 close(fd); 690 } 691 692 #define BUF_PATTERN_1 'a' 693 #define BUF_PATTERN_2 'b' 694 695 static void test_seqpacket_invalid_rec_buffer_client(const struct test_opts *opts) 696 { 697 int fd; 698 unsigned char *buf1; 699 unsigned char *buf2; 700 int buf_size = getpagesize() * 3; 701 702 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 703 if (fd < 0) { 704 perror("connect"); 705 exit(EXIT_FAILURE); 706 } 707 708 buf1 = malloc(buf_size); 709 if (!buf1) { 710 perror("'malloc()' for 'buf1'"); 711 exit(EXIT_FAILURE); 712 } 713 714 buf2 = malloc(buf_size); 715 if (!buf2) { 716 perror("'malloc()' for 'buf2'"); 717 exit(EXIT_FAILURE); 718 } 719 720 memset(buf1, BUF_PATTERN_1, buf_size); 721 memset(buf2, BUF_PATTERN_2, buf_size); 722 723 send_buf(fd, buf1, buf_size, 0, buf_size); 724 725 send_buf(fd, buf2, buf_size, 0, buf_size); 726 727 close(fd); 728 } 729 730 static void test_seqpacket_invalid_rec_buffer_server(const struct test_opts *opts) 731 { 732 int fd; 733 unsigned char *broken_buf; 734 unsigned char *valid_buf; 735 int page_size = getpagesize(); 736 int buf_size = page_size * 3; 737 ssize_t res; 738 int prot = PROT_READ | PROT_WRITE; 739 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 740 int i; 741 742 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 743 if (fd < 0) { 744 perror("accept"); 745 exit(EXIT_FAILURE); 746 } 747 748 /* Setup first buffer. */ 749 broken_buf = mmap(NULL, buf_size, prot, flags, -1, 0); 750 if (broken_buf == MAP_FAILED) { 751 perror("mmap for 'broken_buf'"); 752 exit(EXIT_FAILURE); 753 } 754 755 /* Unmap "hole" in buffer. */ 756 if (munmap(broken_buf + page_size, page_size)) { 757 perror("'broken_buf' setup"); 758 exit(EXIT_FAILURE); 759 } 760 761 valid_buf = mmap(NULL, buf_size, prot, flags, -1, 0); 762 if (valid_buf == MAP_FAILED) { 763 perror("mmap for 'valid_buf'"); 764 exit(EXIT_FAILURE); 765 } 766 767 /* Try to fill buffer with unmapped middle. */ 768 res = read(fd, broken_buf, buf_size); 769 if (res != -1) { 770 fprintf(stderr, 771 "expected 'broken_buf' read(2) failure, got %zi\n", 772 res); 773 exit(EXIT_FAILURE); 774 } 775 776 if (errno != EFAULT) { 777 perror("unexpected errno of 'broken_buf'"); 778 exit(EXIT_FAILURE); 779 } 780 781 /* Try to fill valid buffer. */ 782 res = read(fd, valid_buf, buf_size); 783 if (res < 0) { 784 perror("unexpected 'valid_buf' read(2) failure"); 785 exit(EXIT_FAILURE); 786 } 787 788 if (res != buf_size) { 789 fprintf(stderr, 790 "invalid 'valid_buf' read(2), expected %i, got %zi\n", 791 buf_size, res); 792 exit(EXIT_FAILURE); 793 } 794 795 for (i = 0; i < buf_size; i++) { 796 if (valid_buf[i] != BUF_PATTERN_2) { 797 fprintf(stderr, 798 "invalid pattern for 'valid_buf' at %i, expected %hhX, got %hhX\n", 799 i, BUF_PATTERN_2, valid_buf[i]); 800 exit(EXIT_FAILURE); 801 } 802 } 803 804 /* Unmap buffers. */ 805 munmap(broken_buf, page_size); 806 munmap(broken_buf + page_size * 2, page_size); 807 munmap(valid_buf, buf_size); 808 close(fd); 809 } 810 811 #define RCVLOWAT_BUF_SIZE 128 812 813 static void test_stream_poll_rcvlowat_server(const struct test_opts *opts) 814 { 815 int fd; 816 int i; 817 818 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 819 if (fd < 0) { 820 perror("accept"); 821 exit(EXIT_FAILURE); 822 } 823 824 /* Send 1 byte. */ 825 send_byte(fd, 1, 0); 826 827 control_writeln("SRVSENT"); 828 829 /* Wait until client is ready to receive rest of data. */ 830 control_expectln("CLNSENT"); 831 832 for (i = 0; i < RCVLOWAT_BUF_SIZE - 1; i++) 833 send_byte(fd, 1, 0); 834 835 /* Keep socket in active state. */ 836 control_expectln("POLLDONE"); 837 838 close(fd); 839 } 840 841 static void test_stream_poll_rcvlowat_client(const struct test_opts *opts) 842 { 843 int lowat_val = RCVLOWAT_BUF_SIZE; 844 char buf[RCVLOWAT_BUF_SIZE]; 845 struct pollfd fds; 846 short poll_flags; 847 int fd; 848 849 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 850 if (fd < 0) { 851 perror("connect"); 852 exit(EXIT_FAILURE); 853 } 854 855 setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, 856 lowat_val, "setsockopt(SO_RCVLOWAT)"); 857 858 control_expectln("SRVSENT"); 859 860 /* At this point, server sent 1 byte. */ 861 fds.fd = fd; 862 poll_flags = POLLIN | POLLRDNORM; 863 fds.events = poll_flags; 864 865 /* Try to wait for 1 sec. */ 866 if (poll(&fds, 1, 1000) < 0) { 867 perror("poll"); 868 exit(EXIT_FAILURE); 869 } 870 871 /* poll() must return nothing. */ 872 if (fds.revents) { 873 fprintf(stderr, "Unexpected poll result %hx\n", 874 fds.revents); 875 exit(EXIT_FAILURE); 876 } 877 878 /* Tell server to send rest of data. */ 879 control_writeln("CLNSENT"); 880 881 /* Poll for data. */ 882 if (poll(&fds, 1, 10000) < 0) { 883 perror("poll"); 884 exit(EXIT_FAILURE); 885 } 886 887 /* Only these two bits are expected. */ 888 if (fds.revents != poll_flags) { 889 fprintf(stderr, "Unexpected poll result %hx\n", 890 fds.revents); 891 exit(EXIT_FAILURE); 892 } 893 894 /* Use MSG_DONTWAIT, if call is going to wait, EAGAIN 895 * will be returned. 896 */ 897 recv_buf(fd, buf, sizeof(buf), MSG_DONTWAIT, RCVLOWAT_BUF_SIZE); 898 899 control_writeln("POLLDONE"); 900 901 close(fd); 902 } 903 904 #define INV_BUF_TEST_DATA_LEN 512 905 906 static void test_inv_buf_client(const struct test_opts *opts, bool stream) 907 { 908 unsigned char data[INV_BUF_TEST_DATA_LEN] = {0}; 909 ssize_t expected_ret; 910 int fd; 911 912 if (stream) 913 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 914 else 915 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 916 917 if (fd < 0) { 918 perror("connect"); 919 exit(EXIT_FAILURE); 920 } 921 922 control_expectln("SENDDONE"); 923 924 /* Use invalid buffer here. */ 925 recv_buf(fd, NULL, sizeof(data), 0, -EFAULT); 926 927 if (stream) { 928 /* For SOCK_STREAM we must continue reading. */ 929 expected_ret = sizeof(data); 930 } else { 931 /* For SOCK_SEQPACKET socket's queue must be empty. */ 932 expected_ret = -EAGAIN; 933 } 934 935 recv_buf(fd, data, sizeof(data), MSG_DONTWAIT, expected_ret); 936 937 control_writeln("DONE"); 938 939 close(fd); 940 } 941 942 static void test_inv_buf_server(const struct test_opts *opts, bool stream) 943 { 944 unsigned char data[INV_BUF_TEST_DATA_LEN] = {0}; 945 int fd; 946 947 if (stream) 948 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 949 else 950 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 951 952 if (fd < 0) { 953 perror("accept"); 954 exit(EXIT_FAILURE); 955 } 956 957 send_buf(fd, data, sizeof(data), 0, sizeof(data)); 958 959 control_writeln("SENDDONE"); 960 961 control_expectln("DONE"); 962 963 close(fd); 964 } 965 966 static void test_stream_inv_buf_client(const struct test_opts *opts) 967 { 968 test_inv_buf_client(opts, true); 969 } 970 971 static void test_stream_inv_buf_server(const struct test_opts *opts) 972 { 973 test_inv_buf_server(opts, true); 974 } 975 976 static void test_seqpacket_inv_buf_client(const struct test_opts *opts) 977 { 978 test_inv_buf_client(opts, false); 979 } 980 981 static void test_seqpacket_inv_buf_server(const struct test_opts *opts) 982 { 983 test_inv_buf_server(opts, false); 984 } 985 986 #define HELLO_STR "HELLO" 987 #define WORLD_STR "WORLD" 988 989 static void test_stream_virtio_skb_merge_client(const struct test_opts *opts) 990 { 991 int fd; 992 993 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 994 if (fd < 0) { 995 perror("connect"); 996 exit(EXIT_FAILURE); 997 } 998 999 /* Send first skbuff. */ 1000 send_buf(fd, HELLO_STR, strlen(HELLO_STR), 0, strlen(HELLO_STR)); 1001 1002 control_writeln("SEND0"); 1003 /* Peer reads part of first skbuff. */ 1004 control_expectln("REPLY0"); 1005 1006 /* Send second skbuff, it will be appended to the first. */ 1007 send_buf(fd, WORLD_STR, strlen(WORLD_STR), 0, strlen(WORLD_STR)); 1008 1009 control_writeln("SEND1"); 1010 /* Peer reads merged skbuff packet. */ 1011 control_expectln("REPLY1"); 1012 1013 close(fd); 1014 } 1015 1016 static void test_stream_virtio_skb_merge_server(const struct test_opts *opts) 1017 { 1018 size_t read = 0, to_read; 1019 unsigned char buf[64]; 1020 int fd; 1021 1022 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1023 if (fd < 0) { 1024 perror("accept"); 1025 exit(EXIT_FAILURE); 1026 } 1027 1028 control_expectln("SEND0"); 1029 1030 /* Read skbuff partially. */ 1031 to_read = 2; 1032 recv_buf(fd, buf + read, to_read, 0, to_read); 1033 read += to_read; 1034 1035 control_writeln("REPLY0"); 1036 control_expectln("SEND1"); 1037 1038 /* Read the rest of both buffers */ 1039 to_read = strlen(HELLO_STR WORLD_STR) - read; 1040 recv_buf(fd, buf + read, to_read, 0, to_read); 1041 read += to_read; 1042 1043 /* No more bytes should be there */ 1044 to_read = sizeof(buf) - read; 1045 recv_buf(fd, buf + read, to_read, MSG_DONTWAIT, -EAGAIN); 1046 1047 if (memcmp(buf, HELLO_STR WORLD_STR, strlen(HELLO_STR WORLD_STR))) { 1048 fprintf(stderr, "pattern mismatch\n"); 1049 exit(EXIT_FAILURE); 1050 } 1051 1052 control_writeln("REPLY1"); 1053 1054 close(fd); 1055 } 1056 1057 static void test_seqpacket_msg_peek_client(const struct test_opts *opts) 1058 { 1059 return test_msg_peek_client(opts, true); 1060 } 1061 1062 static void test_seqpacket_msg_peek_server(const struct test_opts *opts) 1063 { 1064 return test_msg_peek_server(opts, true); 1065 } 1066 1067 static sig_atomic_t have_sigpipe; 1068 1069 static void sigpipe(int signo) 1070 { 1071 have_sigpipe = 1; 1072 } 1073 1074 #define SEND_SLEEP_USEC (10 * 1000) 1075 1076 static void test_stream_check_sigpipe(int fd) 1077 { 1078 ssize_t res; 1079 1080 have_sigpipe = 0; 1081 1082 /* When the other peer calls shutdown(SHUT_RD), there is a chance that 1083 * the send() call could occur before the message carrying the close 1084 * information arrives over the transport. In such cases, the send() 1085 * might still succeed. To avoid this race, let's retry the send() call 1086 * a few times, ensuring the test is more reliable. 1087 */ 1088 timeout_begin(TIMEOUT); 1089 while(1) { 1090 res = send(fd, "A", 1, 0); 1091 if (res == -1 && errno != EINTR) 1092 break; 1093 1094 /* Sleep a little before trying again to avoid flooding the 1095 * other peer and filling its receive buffer, causing 1096 * false-negative. 1097 */ 1098 timeout_usleep(SEND_SLEEP_USEC); 1099 timeout_check("send"); 1100 } 1101 timeout_end(); 1102 1103 if (errno != EPIPE) { 1104 fprintf(stderr, "unexpected send(2) errno %d\n", errno); 1105 exit(EXIT_FAILURE); 1106 } 1107 if (!have_sigpipe) { 1108 fprintf(stderr, "SIGPIPE expected\n"); 1109 exit(EXIT_FAILURE); 1110 } 1111 1112 have_sigpipe = 0; 1113 1114 timeout_begin(TIMEOUT); 1115 while(1) { 1116 res = send(fd, "A", 1, MSG_NOSIGNAL); 1117 if (res == -1 && errno != EINTR) 1118 break; 1119 1120 timeout_usleep(SEND_SLEEP_USEC); 1121 timeout_check("send"); 1122 } 1123 timeout_end(); 1124 1125 if (errno != EPIPE) { 1126 fprintf(stderr, "unexpected send(2) errno %d\n", errno); 1127 exit(EXIT_FAILURE); 1128 } 1129 if (have_sigpipe) { 1130 fprintf(stderr, "SIGPIPE not expected\n"); 1131 exit(EXIT_FAILURE); 1132 } 1133 } 1134 1135 static void test_stream_shutwr_client(const struct test_opts *opts) 1136 { 1137 int fd; 1138 1139 struct sigaction act = { 1140 .sa_handler = sigpipe, 1141 }; 1142 1143 sigaction(SIGPIPE, &act, NULL); 1144 1145 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1146 if (fd < 0) { 1147 perror("connect"); 1148 exit(EXIT_FAILURE); 1149 } 1150 1151 if (shutdown(fd, SHUT_WR)) { 1152 perror("shutdown"); 1153 exit(EXIT_FAILURE); 1154 } 1155 1156 test_stream_check_sigpipe(fd); 1157 1158 control_writeln("CLIENTDONE"); 1159 1160 close(fd); 1161 } 1162 1163 static void test_stream_shutwr_server(const struct test_opts *opts) 1164 { 1165 int fd; 1166 1167 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1168 if (fd < 0) { 1169 perror("accept"); 1170 exit(EXIT_FAILURE); 1171 } 1172 1173 control_expectln("CLIENTDONE"); 1174 1175 close(fd); 1176 } 1177 1178 static void test_stream_shutrd_client(const struct test_opts *opts) 1179 { 1180 int fd; 1181 1182 struct sigaction act = { 1183 .sa_handler = sigpipe, 1184 }; 1185 1186 sigaction(SIGPIPE, &act, NULL); 1187 1188 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1189 if (fd < 0) { 1190 perror("connect"); 1191 exit(EXIT_FAILURE); 1192 } 1193 1194 control_expectln("SHUTRDDONE"); 1195 1196 test_stream_check_sigpipe(fd); 1197 1198 control_writeln("CLIENTDONE"); 1199 1200 close(fd); 1201 } 1202 1203 static void test_stream_shutrd_server(const struct test_opts *opts) 1204 { 1205 int fd; 1206 1207 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1208 if (fd < 0) { 1209 perror("accept"); 1210 exit(EXIT_FAILURE); 1211 } 1212 1213 if (shutdown(fd, SHUT_RD)) { 1214 perror("shutdown"); 1215 exit(EXIT_FAILURE); 1216 } 1217 1218 control_writeln("SHUTRDDONE"); 1219 control_expectln("CLIENTDONE"); 1220 1221 close(fd); 1222 } 1223 1224 static void test_double_bind_connect_server(const struct test_opts *opts) 1225 { 1226 int listen_fd, client_fd, i; 1227 struct sockaddr_vm sa_client; 1228 socklen_t socklen_client = sizeof(sa_client); 1229 1230 listen_fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port); 1231 1232 for (i = 0; i < 2; i++) { 1233 control_writeln("LISTENING"); 1234 1235 timeout_begin(TIMEOUT); 1236 do { 1237 client_fd = accept(listen_fd, (struct sockaddr *)&sa_client, 1238 &socklen_client); 1239 timeout_check("accept"); 1240 } while (client_fd < 0 && errno == EINTR); 1241 timeout_end(); 1242 1243 if (client_fd < 0) { 1244 perror("accept"); 1245 exit(EXIT_FAILURE); 1246 } 1247 1248 /* Waiting for remote peer to close connection */ 1249 vsock_wait_remote_close(client_fd); 1250 } 1251 1252 close(listen_fd); 1253 } 1254 1255 static void test_double_bind_connect_client(const struct test_opts *opts) 1256 { 1257 int i, client_fd; 1258 1259 for (i = 0; i < 2; i++) { 1260 /* Wait until server is ready to accept a new connection */ 1261 control_expectln("LISTENING"); 1262 1263 /* We use 'peer_port + 1' as "some" port for the 'bind()' 1264 * call. It is safe for overflow, but must be considered, 1265 * when running multiple test applications simultaneously 1266 * where 'peer-port' argument differs by 1. 1267 */ 1268 client_fd = vsock_bind_connect(opts->peer_cid, opts->peer_port, 1269 opts->peer_port + 1, SOCK_STREAM); 1270 1271 close(client_fd); 1272 } 1273 } 1274 1275 #define MSG_BUF_IOCTL_LEN 64 1276 static void test_unsent_bytes_server(const struct test_opts *opts, int type) 1277 { 1278 unsigned char buf[MSG_BUF_IOCTL_LEN]; 1279 int client_fd; 1280 1281 client_fd = vsock_accept(VMADDR_CID_ANY, opts->peer_port, NULL, type); 1282 if (client_fd < 0) { 1283 perror("accept"); 1284 exit(EXIT_FAILURE); 1285 } 1286 1287 recv_buf(client_fd, buf, sizeof(buf), 0, sizeof(buf)); 1288 control_writeln("RECEIVED"); 1289 1290 close(client_fd); 1291 } 1292 1293 static void test_unsent_bytes_client(const struct test_opts *opts, int type) 1294 { 1295 unsigned char buf[MSG_BUF_IOCTL_LEN]; 1296 int fd; 1297 1298 fd = vsock_connect(opts->peer_cid, opts->peer_port, type); 1299 if (fd < 0) { 1300 perror("connect"); 1301 exit(EXIT_FAILURE); 1302 } 1303 1304 for (int i = 0; i < sizeof(buf); i++) 1305 buf[i] = rand() & 0xFF; 1306 1307 send_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 1308 control_expectln("RECEIVED"); 1309 1310 /* SIOCOUTQ isn't guaranteed to instantly track sent data. Even though 1311 * the "RECEIVED" message means that the other side has received the 1312 * data, there can be a delay in our kernel before updating the "unsent 1313 * bytes" counter. vsock_wait_sent() will repeat SIOCOUTQ until it 1314 * returns 0. 1315 */ 1316 if (!vsock_wait_sent(fd)) 1317 fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n"); 1318 1319 close(fd); 1320 } 1321 1322 static void test_unread_bytes_server(const struct test_opts *opts, int type) 1323 { 1324 unsigned char buf[MSG_BUF_IOCTL_LEN]; 1325 int client_fd; 1326 1327 client_fd = vsock_accept(VMADDR_CID_ANY, opts->peer_port, NULL, type); 1328 if (client_fd < 0) { 1329 perror("accept"); 1330 exit(EXIT_FAILURE); 1331 } 1332 1333 for (int i = 0; i < sizeof(buf); i++) 1334 buf[i] = rand() & 0xFF; 1335 1336 send_buf(client_fd, buf, sizeof(buf), 0, sizeof(buf)); 1337 control_writeln("SENT"); 1338 1339 close(client_fd); 1340 } 1341 1342 static void test_unread_bytes_client(const struct test_opts *opts, int type) 1343 { 1344 unsigned char buf[MSG_BUF_IOCTL_LEN]; 1345 int fd; 1346 1347 fd = vsock_connect(opts->peer_cid, opts->peer_port, type); 1348 if (fd < 0) { 1349 perror("connect"); 1350 exit(EXIT_FAILURE); 1351 } 1352 1353 control_expectln("SENT"); 1354 /* The data has arrived but has not been read. The expected is 1355 * MSG_BUF_IOCTL_LEN. 1356 */ 1357 if (!vsock_ioctl_int(fd, SIOCINQ, MSG_BUF_IOCTL_LEN)) { 1358 fprintf(stderr, "Test skipped, SIOCINQ not supported.\n"); 1359 goto out; 1360 } 1361 1362 recv_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 1363 /* All data has been consumed, so the expected is 0. */ 1364 vsock_ioctl_int(fd, SIOCINQ, 0); 1365 1366 out: 1367 close(fd); 1368 } 1369 1370 static void test_stream_unsent_bytes_client(const struct test_opts *opts) 1371 { 1372 test_unsent_bytes_client(opts, SOCK_STREAM); 1373 } 1374 1375 static void test_stream_unsent_bytes_server(const struct test_opts *opts) 1376 { 1377 test_unsent_bytes_server(opts, SOCK_STREAM); 1378 } 1379 1380 static void test_seqpacket_unsent_bytes_client(const struct test_opts *opts) 1381 { 1382 test_unsent_bytes_client(opts, SOCK_SEQPACKET); 1383 } 1384 1385 static void test_seqpacket_unsent_bytes_server(const struct test_opts *opts) 1386 { 1387 test_unsent_bytes_server(opts, SOCK_SEQPACKET); 1388 } 1389 1390 static void test_stream_unread_bytes_client(const struct test_opts *opts) 1391 { 1392 test_unread_bytes_client(opts, SOCK_STREAM); 1393 } 1394 1395 static void test_stream_unread_bytes_server(const struct test_opts *opts) 1396 { 1397 test_unread_bytes_server(opts, SOCK_STREAM); 1398 } 1399 1400 static void test_seqpacket_unread_bytes_client(const struct test_opts *opts) 1401 { 1402 test_unread_bytes_client(opts, SOCK_SEQPACKET); 1403 } 1404 1405 static void test_seqpacket_unread_bytes_server(const struct test_opts *opts) 1406 { 1407 test_unread_bytes_server(opts, SOCK_SEQPACKET); 1408 } 1409 1410 #define RCVLOWAT_CREDIT_UPD_BUF_SIZE (1024 * 128) 1411 /* This define is the same as in 'include/linux/virtio_vsock.h': 1412 * it is used to decide when to send credit update message during 1413 * reading from rx queue of a socket. Value and its usage in 1414 * kernel is important for this test. 1415 */ 1416 #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) 1417 1418 static void test_stream_rcvlowat_def_cred_upd_client(const struct test_opts *opts) 1419 { 1420 size_t buf_size; 1421 void *buf; 1422 int fd; 1423 1424 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1425 if (fd < 0) { 1426 perror("connect"); 1427 exit(EXIT_FAILURE); 1428 } 1429 1430 /* Send 1 byte more than peer's buffer size. */ 1431 buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE + 1; 1432 1433 buf = malloc(buf_size); 1434 if (!buf) { 1435 perror("malloc"); 1436 exit(EXIT_FAILURE); 1437 } 1438 1439 /* Wait until peer sets needed buffer size. */ 1440 recv_byte(fd, 1, 0); 1441 1442 if (send(fd, buf, buf_size, 0) != buf_size) { 1443 perror("send failed"); 1444 exit(EXIT_FAILURE); 1445 } 1446 1447 free(buf); 1448 close(fd); 1449 } 1450 1451 static void test_stream_credit_update_test(const struct test_opts *opts, 1452 bool low_rx_bytes_test) 1453 { 1454 int recv_buf_size; 1455 struct pollfd fds; 1456 size_t buf_size; 1457 unsigned long long sock_buf_size; 1458 void *buf; 1459 int fd; 1460 1461 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1462 if (fd < 0) { 1463 perror("accept"); 1464 exit(EXIT_FAILURE); 1465 } 1466 1467 buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE; 1468 1469 /* size_t can be < unsigned long long */ 1470 sock_buf_size = buf_size; 1471 1472 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 1473 sock_buf_size, 1474 "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); 1475 1476 if (low_rx_bytes_test) { 1477 /* Set new SO_RCVLOWAT here. This enables sending credit 1478 * update when number of bytes if our rx queue become < 1479 * SO_RCVLOWAT value. 1480 */ 1481 recv_buf_size = 1 + VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; 1482 1483 setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, 1484 recv_buf_size, "setsockopt(SO_RCVLOWAT)"); 1485 } 1486 1487 /* Send one dummy byte here, because 'setsockopt()' above also 1488 * sends special packet which tells sender to update our buffer 1489 * size. This 'send_byte()' will serialize such packet with data 1490 * reads in a loop below. Sender starts transmission only when 1491 * it receives this single byte. 1492 */ 1493 send_byte(fd, 1, 0); 1494 1495 buf = malloc(buf_size); 1496 if (!buf) { 1497 perror("malloc"); 1498 exit(EXIT_FAILURE); 1499 } 1500 1501 /* Wait until there will be 128KB of data in rx queue. */ 1502 while (1) { 1503 ssize_t res; 1504 1505 res = recv(fd, buf, buf_size, MSG_PEEK); 1506 if (res == buf_size) 1507 break; 1508 1509 if (res <= 0) { 1510 fprintf(stderr, "unexpected 'recv()' return: %zi\n", res); 1511 exit(EXIT_FAILURE); 1512 } 1513 } 1514 1515 /* There is 128KB of data in the socket's rx queue, dequeue first 1516 * 64KB, credit update is sent if 'low_rx_bytes_test' == true. 1517 * Otherwise, credit update is sent in 'if (!low_rx_bytes_test)'. 1518 */ 1519 recv_buf_size = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; 1520 recv_buf(fd, buf, recv_buf_size, 0, recv_buf_size); 1521 1522 if (!low_rx_bytes_test) { 1523 recv_buf_size++; 1524 1525 /* Updating SO_RCVLOWAT will send credit update. */ 1526 setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, 1527 recv_buf_size, "setsockopt(SO_RCVLOWAT)"); 1528 } 1529 1530 fds.fd = fd; 1531 fds.events = POLLIN | POLLRDNORM | POLLERR | 1532 POLLRDHUP | POLLHUP; 1533 1534 /* This 'poll()' will return once we receive last byte 1535 * sent by client. 1536 */ 1537 if (poll(&fds, 1, -1) < 0) { 1538 perror("poll"); 1539 exit(EXIT_FAILURE); 1540 } 1541 1542 if (fds.revents & POLLERR) { 1543 fprintf(stderr, "'poll()' error\n"); 1544 exit(EXIT_FAILURE); 1545 } 1546 1547 if (fds.revents & (POLLIN | POLLRDNORM)) { 1548 recv_buf(fd, buf, recv_buf_size, MSG_DONTWAIT, recv_buf_size); 1549 } else { 1550 /* These flags must be set, as there is at 1551 * least 64KB of data ready to read. 1552 */ 1553 fprintf(stderr, "POLLIN | POLLRDNORM expected\n"); 1554 exit(EXIT_FAILURE); 1555 } 1556 1557 free(buf); 1558 close(fd); 1559 } 1560 1561 static void test_stream_cred_upd_on_low_rx_bytes(const struct test_opts *opts) 1562 { 1563 test_stream_credit_update_test(opts, true); 1564 } 1565 1566 static void test_stream_cred_upd_on_set_rcvlowat(const struct test_opts *opts) 1567 { 1568 test_stream_credit_update_test(opts, false); 1569 } 1570 1571 /* The goal of test leak_acceptq is to stress the race between connect() and 1572 * close(listener). Implementation of client/server loops boils down to: 1573 * 1574 * client server 1575 * ------ ------ 1576 * write(CONTINUE) 1577 * expect(CONTINUE) 1578 * listen() 1579 * write(LISTENING) 1580 * expect(LISTENING) 1581 * connect() close() 1582 */ 1583 #define ACCEPTQ_LEAK_RACE_TIMEOUT 2 /* seconds */ 1584 1585 static void test_stream_leak_acceptq_client(const struct test_opts *opts) 1586 { 1587 time_t tout; 1588 int fd; 1589 1590 tout = current_nsec() + ACCEPTQ_LEAK_RACE_TIMEOUT * NSEC_PER_SEC; 1591 do { 1592 control_writeulong(CONTROL_CONTINUE); 1593 1594 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1595 if (fd >= 0) 1596 close(fd); 1597 } while (current_nsec() < tout); 1598 1599 control_writeulong(CONTROL_DONE); 1600 } 1601 1602 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */ 1603 static void test_stream_leak_acceptq_server(const struct test_opts *opts) 1604 { 1605 int fd; 1606 1607 while (control_readulong() == CONTROL_CONTINUE) { 1608 fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port); 1609 control_writeln("LISTENING"); 1610 close(fd); 1611 } 1612 } 1613 1614 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */ 1615 static void test_stream_msgzcopy_leak_errq_client(const struct test_opts *opts) 1616 { 1617 struct pollfd fds = { 0 }; 1618 int fd; 1619 1620 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1621 if (fd < 0) { 1622 perror("connect"); 1623 exit(EXIT_FAILURE); 1624 } 1625 1626 enable_so_zerocopy_check(fd); 1627 send_byte(fd, 1, MSG_ZEROCOPY); 1628 1629 fds.fd = fd; 1630 fds.events = 0; 1631 if (poll(&fds, 1, -1) < 0) { 1632 perror("poll"); 1633 exit(EXIT_FAILURE); 1634 } 1635 1636 close(fd); 1637 } 1638 1639 static void test_stream_msgzcopy_leak_errq_server(const struct test_opts *opts) 1640 { 1641 int fd; 1642 1643 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1644 if (fd < 0) { 1645 perror("accept"); 1646 exit(EXIT_FAILURE); 1647 } 1648 1649 recv_byte(fd, 1, 0); 1650 vsock_wait_remote_close(fd); 1651 close(fd); 1652 } 1653 1654 /* Test msgzcopy_leak_zcskb is meant to exercise sendmsg() error handling path, 1655 * that might leak an skb. The idea is to fail virtio_transport_init_zcopy_skb() 1656 * by hitting net.core.optmem_max limit in sock_omalloc(), specifically 1657 * 1658 * vsock_connectible_sendmsg 1659 * virtio_transport_stream_enqueue 1660 * virtio_transport_send_pkt_info 1661 * virtio_transport_init_zcopy_skb 1662 * . msg_zerocopy_realloc 1663 * . msg_zerocopy_alloc 1664 * . sock_omalloc 1665 * . sk_omem_alloc + size > sysctl_optmem_max 1666 * return -ENOMEM 1667 * 1668 * We abuse the implementation detail of net/socket.c:____sys_sendmsg(). 1669 * sk_omem_alloc can be precisely bumped by sock_kmalloc(), as it is used to 1670 * fetch user-provided control data. 1671 * 1672 * While this approach works for now, it relies on assumptions regarding the 1673 * implementation and configuration (for example, order of net.core.optmem_max 1674 * can not exceed MAX_PAGE_ORDER), which may not hold in the future. A more 1675 * resilient testing could be implemented by leveraging the Fault injection 1676 * framework (CONFIG_FAULT_INJECTION), e.g. 1677 * 1678 * client# echo N > /sys/kernel/debug/failslab/ignore-gfp-wait 1679 * client# echo 0 > /sys/kernel/debug/failslab/verbose 1680 * 1681 * void client(const struct test_opts *opts) 1682 * { 1683 * char buf[16]; 1684 * int f, s, i; 1685 * 1686 * f = open("/proc/self/fail-nth", O_WRONLY); 1687 * 1688 * for (i = 1; i < 32; i++) { 1689 * control_writeulong(CONTROL_CONTINUE); 1690 * 1691 * s = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1692 * enable_so_zerocopy_check(s); 1693 * 1694 * sprintf(buf, "%d", i); 1695 * write(f, buf, strlen(buf)); 1696 * 1697 * send(s, &(char){ 0 }, 1, MSG_ZEROCOPY); 1698 * 1699 * write(f, "0", 1); 1700 * close(s); 1701 * } 1702 * 1703 * control_writeulong(CONTROL_DONE); 1704 * close(f); 1705 * } 1706 * 1707 * void server(const struct test_opts *opts) 1708 * { 1709 * int fd; 1710 * 1711 * while (control_readulong() == CONTROL_CONTINUE) { 1712 * fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1713 * vsock_wait_remote_close(fd); 1714 * close(fd); 1715 * } 1716 * } 1717 * 1718 * Refer to Documentation/fault-injection/fault-injection.rst. 1719 */ 1720 #define MAX_PAGE_ORDER 10 /* usually */ 1721 #define PAGE_SIZE 4096 1722 1723 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */ 1724 static void test_stream_msgzcopy_leak_zcskb_client(const struct test_opts *opts) 1725 { 1726 size_t optmem_max, ctl_len, chunk_size; 1727 struct msghdr msg = { 0 }; 1728 struct iovec iov; 1729 char *chunk; 1730 int fd, res; 1731 FILE *f; 1732 1733 f = fopen("/proc/sys/net/core/optmem_max", "r"); 1734 if (!f) { 1735 perror("fopen(optmem_max)"); 1736 exit(EXIT_FAILURE); 1737 } 1738 1739 if (fscanf(f, "%zu", &optmem_max) != 1) { 1740 fprintf(stderr, "fscanf(optmem_max) failed\n"); 1741 exit(EXIT_FAILURE); 1742 } 1743 1744 fclose(f); 1745 1746 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1747 if (fd < 0) { 1748 perror("connect"); 1749 exit(EXIT_FAILURE); 1750 } 1751 1752 enable_so_zerocopy_check(fd); 1753 1754 ctl_len = optmem_max - 1; 1755 if (ctl_len > PAGE_SIZE << MAX_PAGE_ORDER) { 1756 fprintf(stderr, "Try with net.core.optmem_max = 100000\n"); 1757 exit(EXIT_FAILURE); 1758 } 1759 1760 chunk_size = CMSG_SPACE(ctl_len); 1761 chunk = malloc(chunk_size); 1762 if (!chunk) { 1763 perror("malloc"); 1764 exit(EXIT_FAILURE); 1765 } 1766 memset(chunk, 0, chunk_size); 1767 1768 iov.iov_base = &(char){ 0 }; 1769 iov.iov_len = 1; 1770 1771 msg.msg_iov = &iov; 1772 msg.msg_iovlen = 1; 1773 msg.msg_control = chunk; 1774 msg.msg_controllen = ctl_len; 1775 1776 errno = 0; 1777 res = sendmsg(fd, &msg, MSG_ZEROCOPY); 1778 if (res >= 0 || errno != ENOMEM) { 1779 fprintf(stderr, "Expected ENOMEM, got errno=%d res=%d\n", 1780 errno, res); 1781 exit(EXIT_FAILURE); 1782 } 1783 1784 close(fd); 1785 } 1786 1787 static void test_stream_msgzcopy_leak_zcskb_server(const struct test_opts *opts) 1788 { 1789 int fd; 1790 1791 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1792 if (fd < 0) { 1793 perror("accept"); 1794 exit(EXIT_FAILURE); 1795 } 1796 1797 vsock_wait_remote_close(fd); 1798 close(fd); 1799 } 1800 1801 #define MAX_PORT_RETRIES 24 /* net/vmw_vsock/af_vsock.c */ 1802 1803 static bool test_stream_transport_uaf(int cid) 1804 { 1805 int sockets[MAX_PORT_RETRIES]; 1806 struct sockaddr_vm addr; 1807 socklen_t alen; 1808 int fd, i, c; 1809 bool ret; 1810 1811 /* Probe for a transport by attempting a local CID bind. Unavailable 1812 * transport (or more specifically: an unsupported transport/CID 1813 * combination) results in EADDRNOTAVAIL, other errnos are fatal. 1814 */ 1815 fd = vsock_bind_try(cid, VMADDR_PORT_ANY, SOCK_STREAM); 1816 if (fd < 0) { 1817 if (errno != EADDRNOTAVAIL) { 1818 perror("Unexpected bind() errno"); 1819 exit(EXIT_FAILURE); 1820 } 1821 1822 return false; 1823 } 1824 1825 alen = sizeof(addr); 1826 if (getsockname(fd, (struct sockaddr *)&addr, &alen)) { 1827 perror("getsockname"); 1828 exit(EXIT_FAILURE); 1829 } 1830 1831 /* Drain the autobind pool; see __vsock_bind_connectible(). */ 1832 for (i = 0; i < MAX_PORT_RETRIES; ++i) 1833 sockets[i] = vsock_bind(cid, ++addr.svm_port, SOCK_STREAM); 1834 1835 close(fd); 1836 1837 /* Setting SOCK_NONBLOCK makes connect() return soon after 1838 * (re-)assigning the transport. We are not connecting to anything 1839 * anyway, so there is no point entering the main loop in 1840 * vsock_connect(); waiting for timeout, checking for signals, etc. 1841 */ 1842 fd = socket(AF_VSOCK, SOCK_STREAM | SOCK_NONBLOCK, 0); 1843 if (fd < 0) { 1844 perror("socket"); 1845 exit(EXIT_FAILURE); 1846 } 1847 1848 /* Assign transport, while failing to autobind. Autobind pool was 1849 * drained, so EADDRNOTAVAIL coming from __vsock_bind_connectible() is 1850 * expected. 1851 * 1852 * One exception is ENODEV which is thrown by vsock_assign_transport(), 1853 * i.e. before vsock_auto_bind(), when the only transport loaded is 1854 * vhost. 1855 */ 1856 if (!connect(fd, (struct sockaddr *)&addr, alen)) { 1857 fprintf(stderr, "Unexpected connect() success\n"); 1858 exit(EXIT_FAILURE); 1859 } 1860 if (errno == ENODEV && cid == VMADDR_CID_HOST) { 1861 ret = false; 1862 goto cleanup; 1863 } 1864 if (errno != EADDRNOTAVAIL) { 1865 perror("Unexpected connect() errno"); 1866 exit(EXIT_FAILURE); 1867 } 1868 1869 /* Reassign transport, triggering old transport release and 1870 * (potentially) unbinding of an unbound socket. 1871 * 1872 * Vulnerable system may crash now. 1873 */ 1874 for (c = VMADDR_CID_HYPERVISOR; c <= VMADDR_CID_HOST + 1; ++c) { 1875 if (c != cid) { 1876 addr.svm_cid = c; 1877 (void)connect(fd, (struct sockaddr *)&addr, alen); 1878 } 1879 } 1880 1881 ret = true; 1882 cleanup: 1883 close(fd); 1884 while (i--) 1885 close(sockets[i]); 1886 1887 return ret; 1888 } 1889 1890 /* Test attempts to trigger a transport release for an unbound socket. This can 1891 * lead to a reference count mishandling. 1892 */ 1893 static void test_stream_transport_uaf_client(const struct test_opts *opts) 1894 { 1895 bool tested = false; 1896 int cid, tr; 1897 1898 for (cid = VMADDR_CID_HYPERVISOR; cid <= VMADDR_CID_HOST + 1; ++cid) 1899 tested |= test_stream_transport_uaf(cid); 1900 1901 tr = get_transports(); 1902 if (!tr) 1903 fprintf(stderr, "No transports detected\n"); 1904 else if (tr == TRANSPORT_VIRTIO) 1905 fprintf(stderr, "Setup unsupported: sole virtio transport\n"); 1906 else if (!tested) 1907 fprintf(stderr, "No transports tested\n"); 1908 } 1909 1910 static void test_stream_connect_retry_client(const struct test_opts *opts) 1911 { 1912 int fd; 1913 1914 fd = socket(AF_VSOCK, SOCK_STREAM, 0); 1915 if (fd < 0) { 1916 perror("socket"); 1917 exit(EXIT_FAILURE); 1918 } 1919 1920 if (!vsock_connect_fd(fd, opts->peer_cid, opts->peer_port)) { 1921 fprintf(stderr, "Unexpected connect() #1 success\n"); 1922 exit(EXIT_FAILURE); 1923 } 1924 1925 control_writeln("LISTEN"); 1926 control_expectln("LISTENING"); 1927 1928 if (vsock_connect_fd(fd, opts->peer_cid, opts->peer_port)) { 1929 perror("connect() #2"); 1930 exit(EXIT_FAILURE); 1931 } 1932 1933 close(fd); 1934 } 1935 1936 static void test_stream_connect_retry_server(const struct test_opts *opts) 1937 { 1938 int fd; 1939 1940 control_expectln("LISTEN"); 1941 1942 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1943 if (fd < 0) { 1944 perror("accept"); 1945 exit(EXIT_FAILURE); 1946 } 1947 1948 vsock_wait_remote_close(fd); 1949 close(fd); 1950 } 1951 1952 #define TRANSPORT_CHANGE_TIMEOUT 2 /* seconds */ 1953 1954 static void *test_stream_transport_change_thread(void *vargp) 1955 { 1956 pid_t *pid = (pid_t *)vargp; 1957 int ret; 1958 1959 ret = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); 1960 if (ret) { 1961 fprintf(stderr, "pthread_setcanceltype: %d\n", ret); 1962 exit(EXIT_FAILURE); 1963 } 1964 1965 while (true) { 1966 if (kill(*pid, SIGUSR1) < 0) { 1967 perror("kill"); 1968 exit(EXIT_FAILURE); 1969 } 1970 } 1971 return NULL; 1972 } 1973 1974 static void test_transport_change_signal_handler(int signal) 1975 { 1976 /* We need a custom handler for SIGUSR1 as the default one terminates the process. */ 1977 } 1978 1979 static void test_stream_transport_change_client(const struct test_opts *opts) 1980 { 1981 __sighandler_t old_handler; 1982 pid_t pid = getpid(); 1983 pthread_t thread_id; 1984 time_t tout; 1985 int ret, tr; 1986 1987 tr = get_transports(); 1988 1989 /* Print a warning if there is a G2H transport loaded. 1990 * This is on a best effort basis because VMCI can be either G2H and H2G, and there is 1991 * no easy way to understand it. 1992 * The bug we are testing only appears when G2H transports are not loaded. 1993 * This is because `vsock_assign_transport`, when using CID 0, assigns a G2H transport 1994 * to vsk->transport. If none is available it is set to NULL, causing the null-ptr-deref. 1995 */ 1996 if (tr & TRANSPORTS_G2H) 1997 fprintf(stderr, "G2H Transport detected. This test will not fail.\n"); 1998 1999 old_handler = signal(SIGUSR1, test_transport_change_signal_handler); 2000 if (old_handler == SIG_ERR) { 2001 perror("signal"); 2002 exit(EXIT_FAILURE); 2003 } 2004 2005 ret = pthread_create(&thread_id, NULL, test_stream_transport_change_thread, &pid); 2006 if (ret) { 2007 fprintf(stderr, "pthread_create: %d\n", ret); 2008 exit(EXIT_FAILURE); 2009 } 2010 2011 control_expectln("LISTENING"); 2012 2013 tout = current_nsec() + TRANSPORT_CHANGE_TIMEOUT * NSEC_PER_SEC; 2014 do { 2015 struct sockaddr_vm sa = { 2016 .svm_family = AF_VSOCK, 2017 .svm_cid = opts->peer_cid, 2018 .svm_port = opts->peer_port, 2019 }; 2020 bool send_control = false; 2021 int s; 2022 2023 s = socket(AF_VSOCK, SOCK_STREAM, 0); 2024 if (s < 0) { 2025 perror("socket"); 2026 exit(EXIT_FAILURE); 2027 } 2028 2029 /* Although setting SO_LINGER does not affect the original test 2030 * for null-ptr-deref, it may trigger a lockdep warning. 2031 */ 2032 enable_so_linger(s, 1); 2033 2034 ret = connect(s, (struct sockaddr *)&sa, sizeof(sa)); 2035 /* The connect can fail due to signals coming from the thread, 2036 * or because the receiver connection queue is full. 2037 * Ignoring also the latter case because there is no way 2038 * of synchronizing client's connect and server's accept when 2039 * connect(s) are constantly being interrupted by signals. 2040 */ 2041 if (ret == -1 && (errno != EINTR && errno != ECONNRESET)) { 2042 perror("connect"); 2043 exit(EXIT_FAILURE); 2044 } 2045 2046 /* Notify the server if the connect() is successful or the 2047 * receiver connection queue is full, so it will do accept() 2048 * to drain it. 2049 */ 2050 if (!ret || errno == ECONNRESET) 2051 send_control = true; 2052 2053 /* Set CID to 0 cause a transport change. */ 2054 sa.svm_cid = 0; 2055 2056 /* There is a case where this will not fail: 2057 * if the previous connect() is interrupted while the 2058 * connection request is already sent, this second 2059 * connect() will wait for the response. 2060 */ 2061 ret = connect(s, (struct sockaddr *)&sa, sizeof(sa)); 2062 if (!ret || errno == ECONNRESET) 2063 send_control = true; 2064 2065 close(s); 2066 2067 if (send_control) 2068 control_writeulong(CONTROL_CONTINUE); 2069 2070 } while (current_nsec() < tout); 2071 2072 control_writeulong(CONTROL_DONE); 2073 2074 ret = pthread_cancel(thread_id); 2075 if (ret) { 2076 fprintf(stderr, "pthread_cancel: %d\n", ret); 2077 exit(EXIT_FAILURE); 2078 } 2079 2080 ret = pthread_join(thread_id, NULL); 2081 if (ret) { 2082 fprintf(stderr, "pthread_join: %d\n", ret); 2083 exit(EXIT_FAILURE); 2084 } 2085 2086 if (signal(SIGUSR1, old_handler) == SIG_ERR) { 2087 perror("signal"); 2088 exit(EXIT_FAILURE); 2089 } 2090 } 2091 2092 static void test_stream_transport_change_server(const struct test_opts *opts) 2093 { 2094 int s = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port); 2095 2096 /* Set the socket to be nonblocking because connects that have been interrupted 2097 * (EINTR) can fill the receiver's accept queue anyway, leading to connect failure. 2098 * As of today (6.15) in such situation there is no way to understand, from the 2099 * client side, if the connection has been queued in the server or not. 2100 */ 2101 if (fcntl(s, F_SETFL, fcntl(s, F_GETFL, 0) | O_NONBLOCK) < 0) { 2102 perror("fcntl"); 2103 exit(EXIT_FAILURE); 2104 } 2105 control_writeln("LISTENING"); 2106 2107 while (control_readulong() == CONTROL_CONTINUE) { 2108 /* Must accept the connection, otherwise the `listen` 2109 * queue will fill up and new connections will fail. 2110 * There can be more than one queued connection, 2111 * clear them all. 2112 */ 2113 while (true) { 2114 int client = accept(s, NULL, NULL); 2115 2116 if (client < 0) { 2117 if (errno == EAGAIN) 2118 break; 2119 2120 perror("accept"); 2121 exit(EXIT_FAILURE); 2122 } 2123 2124 close(client); 2125 } 2126 } 2127 2128 close(s); 2129 } 2130 2131 static void test_stream_linger_client(const struct test_opts *opts) 2132 { 2133 int fd; 2134 2135 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 2136 if (fd < 0) { 2137 perror("connect"); 2138 exit(EXIT_FAILURE); 2139 } 2140 2141 enable_so_linger(fd, 1); 2142 close(fd); 2143 } 2144 2145 static void test_stream_linger_server(const struct test_opts *opts) 2146 { 2147 int fd; 2148 2149 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 2150 if (fd < 0) { 2151 perror("accept"); 2152 exit(EXIT_FAILURE); 2153 } 2154 2155 vsock_wait_remote_close(fd); 2156 close(fd); 2157 } 2158 2159 /* Half of the default to not risk timing out the control channel */ 2160 #define LINGER_TIMEOUT (TIMEOUT / 2) 2161 2162 static void test_stream_nolinger_client(const struct test_opts *opts) 2163 { 2164 bool waited; 2165 time_t ns; 2166 int fd; 2167 2168 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 2169 if (fd < 0) { 2170 perror("connect"); 2171 exit(EXIT_FAILURE); 2172 } 2173 2174 enable_so_linger(fd, LINGER_TIMEOUT); 2175 send_byte(fd, 1, 0); /* Left unread to expose incorrect behaviour. */ 2176 waited = vsock_wait_sent(fd); 2177 2178 ns = current_nsec(); 2179 close(fd); 2180 ns = current_nsec() - ns; 2181 2182 if (!waited) { 2183 fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n"); 2184 } else if (DIV_ROUND_UP(ns, NSEC_PER_SEC) >= LINGER_TIMEOUT) { 2185 fprintf(stderr, "Unexpected lingering\n"); 2186 exit(EXIT_FAILURE); 2187 } 2188 2189 control_writeln("DONE"); 2190 } 2191 2192 static void test_stream_nolinger_server(const struct test_opts *opts) 2193 { 2194 int fd; 2195 2196 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 2197 if (fd < 0) { 2198 perror("accept"); 2199 exit(EXIT_FAILURE); 2200 } 2201 2202 control_expectln("DONE"); 2203 close(fd); 2204 } 2205 2206 static void test_stream_accepted_setsockopt_client(const struct test_opts *opts) 2207 { 2208 int fd; 2209 2210 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 2211 if (fd < 0) { 2212 perror("connect"); 2213 exit(EXIT_FAILURE); 2214 } 2215 2216 close(fd); 2217 } 2218 2219 static void test_stream_accepted_setsockopt_server(const struct test_opts *opts) 2220 { 2221 int fd; 2222 2223 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 2224 if (fd < 0) { 2225 perror("accept"); 2226 exit(EXIT_FAILURE); 2227 } 2228 2229 enable_so_zerocopy_check(fd); 2230 close(fd); 2231 } 2232 2233 static struct test_case test_cases[] = { 2234 { 2235 .name = "SOCK_STREAM connection reset", 2236 .run_client = test_stream_connection_reset, 2237 }, 2238 { 2239 .name = "SOCK_STREAM bind only", 2240 .run_client = test_stream_bind_only_client, 2241 .run_server = test_stream_bind_only_server, 2242 }, 2243 { 2244 .name = "SOCK_STREAM client close", 2245 .run_client = test_stream_client_close_client, 2246 .run_server = test_stream_client_close_server, 2247 }, 2248 { 2249 .name = "SOCK_STREAM server close", 2250 .run_client = test_stream_server_close_client, 2251 .run_server = test_stream_server_close_server, 2252 }, 2253 { 2254 .name = "SOCK_STREAM multiple connections", 2255 .run_client = test_stream_multiconn_client, 2256 .run_server = test_stream_multiconn_server, 2257 }, 2258 { 2259 .name = "SOCK_STREAM MSG_PEEK", 2260 .run_client = test_stream_msg_peek_client, 2261 .run_server = test_stream_msg_peek_server, 2262 }, 2263 { 2264 .name = "SOCK_SEQPACKET msg bounds", 2265 .run_client = test_seqpacket_msg_bounds_client, 2266 .run_server = test_seqpacket_msg_bounds_server, 2267 }, 2268 { 2269 .name = "SOCK_SEQPACKET MSG_TRUNC flag", 2270 .run_client = test_seqpacket_msg_trunc_client, 2271 .run_server = test_seqpacket_msg_trunc_server, 2272 }, 2273 { 2274 .name = "SOCK_SEQPACKET timeout", 2275 .run_client = test_seqpacket_timeout_client, 2276 .run_server = test_seqpacket_timeout_server, 2277 }, 2278 { 2279 .name = "SOCK_SEQPACKET invalid receive buffer", 2280 .run_client = test_seqpacket_invalid_rec_buffer_client, 2281 .run_server = test_seqpacket_invalid_rec_buffer_server, 2282 }, 2283 { 2284 .name = "SOCK_STREAM poll() + SO_RCVLOWAT", 2285 .run_client = test_stream_poll_rcvlowat_client, 2286 .run_server = test_stream_poll_rcvlowat_server, 2287 }, 2288 { 2289 .name = "SOCK_SEQPACKET big message", 2290 .run_client = test_seqpacket_bigmsg_client, 2291 .run_server = test_seqpacket_bigmsg_server, 2292 }, 2293 { 2294 .name = "SOCK_STREAM test invalid buffer", 2295 .run_client = test_stream_inv_buf_client, 2296 .run_server = test_stream_inv_buf_server, 2297 }, 2298 { 2299 .name = "SOCK_SEQPACKET test invalid buffer", 2300 .run_client = test_seqpacket_inv_buf_client, 2301 .run_server = test_seqpacket_inv_buf_server, 2302 }, 2303 { 2304 .name = "SOCK_STREAM virtio skb merge", 2305 .run_client = test_stream_virtio_skb_merge_client, 2306 .run_server = test_stream_virtio_skb_merge_server, 2307 }, 2308 { 2309 .name = "SOCK_SEQPACKET MSG_PEEK", 2310 .run_client = test_seqpacket_msg_peek_client, 2311 .run_server = test_seqpacket_msg_peek_server, 2312 }, 2313 { 2314 .name = "SOCK_STREAM SHUT_WR", 2315 .run_client = test_stream_shutwr_client, 2316 .run_server = test_stream_shutwr_server, 2317 }, 2318 { 2319 .name = "SOCK_STREAM SHUT_RD", 2320 .run_client = test_stream_shutrd_client, 2321 .run_server = test_stream_shutrd_server, 2322 }, 2323 { 2324 .name = "SOCK_STREAM MSG_ZEROCOPY", 2325 .run_client = test_stream_msgzcopy_client, 2326 .run_server = test_stream_msgzcopy_server, 2327 }, 2328 { 2329 .name = "SOCK_SEQPACKET MSG_ZEROCOPY", 2330 .run_client = test_seqpacket_msgzcopy_client, 2331 .run_server = test_seqpacket_msgzcopy_server, 2332 }, 2333 { 2334 .name = "SOCK_STREAM MSG_ZEROCOPY empty MSG_ERRQUEUE", 2335 .run_client = test_stream_msgzcopy_empty_errq_client, 2336 .run_server = test_stream_msgzcopy_empty_errq_server, 2337 }, 2338 { 2339 .name = "SOCK_STREAM double bind connect", 2340 .run_client = test_double_bind_connect_client, 2341 .run_server = test_double_bind_connect_server, 2342 }, 2343 { 2344 .name = "SOCK_STREAM virtio credit update + SO_RCVLOWAT", 2345 .run_client = test_stream_rcvlowat_def_cred_upd_client, 2346 .run_server = test_stream_cred_upd_on_set_rcvlowat, 2347 }, 2348 { 2349 .name = "SOCK_STREAM virtio credit update + low rx_bytes", 2350 .run_client = test_stream_rcvlowat_def_cred_upd_client, 2351 .run_server = test_stream_cred_upd_on_low_rx_bytes, 2352 }, 2353 { 2354 .name = "SOCK_STREAM ioctl(SIOCOUTQ) 0 unsent bytes", 2355 .run_client = test_stream_unsent_bytes_client, 2356 .run_server = test_stream_unsent_bytes_server, 2357 }, 2358 { 2359 .name = "SOCK_SEQPACKET ioctl(SIOCOUTQ) 0 unsent bytes", 2360 .run_client = test_seqpacket_unsent_bytes_client, 2361 .run_server = test_seqpacket_unsent_bytes_server, 2362 }, 2363 { 2364 .name = "SOCK_STREAM leak accept queue", 2365 .run_client = test_stream_leak_acceptq_client, 2366 .run_server = test_stream_leak_acceptq_server, 2367 }, 2368 { 2369 .name = "SOCK_STREAM MSG_ZEROCOPY leak MSG_ERRQUEUE", 2370 .run_client = test_stream_msgzcopy_leak_errq_client, 2371 .run_server = test_stream_msgzcopy_leak_errq_server, 2372 }, 2373 { 2374 .name = "SOCK_STREAM MSG_ZEROCOPY leak completion skb", 2375 .run_client = test_stream_msgzcopy_leak_zcskb_client, 2376 .run_server = test_stream_msgzcopy_leak_zcskb_server, 2377 }, 2378 { 2379 .name = "SOCK_STREAM transport release use-after-free", 2380 .run_client = test_stream_transport_uaf_client, 2381 }, 2382 { 2383 .name = "SOCK_STREAM retry failed connect()", 2384 .run_client = test_stream_connect_retry_client, 2385 .run_server = test_stream_connect_retry_server, 2386 }, 2387 { 2388 .name = "SOCK_STREAM SO_LINGER null-ptr-deref", 2389 .run_client = test_stream_linger_client, 2390 .run_server = test_stream_linger_server, 2391 }, 2392 { 2393 .name = "SOCK_STREAM SO_LINGER close() on unread", 2394 .run_client = test_stream_nolinger_client, 2395 .run_server = test_stream_nolinger_server, 2396 }, 2397 { 2398 .name = "SOCK_STREAM transport change null-ptr-deref, lockdep warn", 2399 .run_client = test_stream_transport_change_client, 2400 .run_server = test_stream_transport_change_server, 2401 }, 2402 { 2403 .name = "SOCK_STREAM ioctl(SIOCINQ) functionality", 2404 .run_client = test_stream_unread_bytes_client, 2405 .run_server = test_stream_unread_bytes_server, 2406 }, 2407 { 2408 .name = "SOCK_SEQPACKET ioctl(SIOCINQ) functionality", 2409 .run_client = test_seqpacket_unread_bytes_client, 2410 .run_server = test_seqpacket_unread_bytes_server, 2411 }, 2412 { 2413 .name = "SOCK_STREAM accept()ed socket custom setsockopt()", 2414 .run_client = test_stream_accepted_setsockopt_client, 2415 .run_server = test_stream_accepted_setsockopt_server, 2416 }, 2417 { 2418 .name = "SOCK_STREAM virtio MSG_ZEROCOPY coalescence corruption", 2419 .run_client = test_stream_msgzcopy_mangle_client, 2420 .run_server = test_stream_msgzcopy_mangle_server, 2421 }, 2422 {}, 2423 }; 2424 2425 static const char optstring[] = ""; 2426 static const struct option longopts[] = { 2427 { 2428 .name = "control-host", 2429 .has_arg = required_argument, 2430 .val = 'H', 2431 }, 2432 { 2433 .name = "control-port", 2434 .has_arg = required_argument, 2435 .val = 'P', 2436 }, 2437 { 2438 .name = "mode", 2439 .has_arg = required_argument, 2440 .val = 'm', 2441 }, 2442 { 2443 .name = "peer-cid", 2444 .has_arg = required_argument, 2445 .val = 'p', 2446 }, 2447 { 2448 .name = "peer-port", 2449 .has_arg = required_argument, 2450 .val = 'q', 2451 }, 2452 { 2453 .name = "list", 2454 .has_arg = no_argument, 2455 .val = 'l', 2456 }, 2457 { 2458 .name = "skip", 2459 .has_arg = required_argument, 2460 .val = 's', 2461 }, 2462 { 2463 .name = "pick", 2464 .has_arg = required_argument, 2465 .val = 't', 2466 }, 2467 { 2468 .name = "help", 2469 .has_arg = no_argument, 2470 .val = '?', 2471 }, 2472 {}, 2473 }; 2474 2475 static void usage(void) 2476 { 2477 fprintf(stderr, "Usage: vsock_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid> [--peer-port=<port>] [--list] [--skip=<test_id>]\n" 2478 "\n" 2479 " Server: vsock_test --control-port=1234 --mode=server --peer-cid=3\n" 2480 " Client: vsock_test --control-host=192.168.0.1 --control-port=1234 --mode=client --peer-cid=2\n" 2481 "\n" 2482 "Run vsock.ko tests. Must be launched in both guest\n" 2483 "and host. One side must use --mode=client and\n" 2484 "the other side must use --mode=server.\n" 2485 "\n" 2486 "A TCP control socket connection is used to coordinate tests\n" 2487 "between the client and the server. The server requires a\n" 2488 "listen address and the client requires an address to\n" 2489 "connect to.\n" 2490 "\n" 2491 "The CID of the other side must be given with --peer-cid=<cid>.\n" 2492 "During the test, two AF_VSOCK ports will be used: the port\n" 2493 "specified with --peer-port=<port> (or the default port)\n" 2494 "and the next one.\n" 2495 "\n" 2496 "Options:\n" 2497 " --help This help message\n" 2498 " --control-host <host> Server IP address to connect to\n" 2499 " --control-port <port> Server port to listen on/connect to\n" 2500 " --mode client|server Server or client mode\n" 2501 " --peer-cid <cid> CID of the other side\n" 2502 " --peer-port <port> AF_VSOCK port used for the test [default: %d]\n" 2503 " --list List of tests that will be executed\n" 2504 " --pick <test_id> Test ID to execute selectively;\n" 2505 " use multiple --pick options to select more tests\n" 2506 " --skip <test_id> Test ID to skip;\n" 2507 " use multiple --skip options to skip more tests\n", 2508 DEFAULT_PEER_PORT 2509 ); 2510 exit(EXIT_FAILURE); 2511 } 2512 2513 int main(int argc, char **argv) 2514 { 2515 const char *control_host = NULL; 2516 const char *control_port = NULL; 2517 struct test_opts opts = { 2518 .mode = TEST_MODE_UNSET, 2519 .peer_cid = VMADDR_CID_ANY, 2520 .peer_port = DEFAULT_PEER_PORT, 2521 }; 2522 2523 srand(time(NULL)); 2524 init_signals(); 2525 2526 for (;;) { 2527 int opt = getopt_long(argc, argv, optstring, longopts, NULL); 2528 2529 if (opt == -1) 2530 break; 2531 2532 switch (opt) { 2533 case 'H': 2534 control_host = optarg; 2535 break; 2536 case 'm': 2537 if (strcmp(optarg, "client") == 0) 2538 opts.mode = TEST_MODE_CLIENT; 2539 else if (strcmp(optarg, "server") == 0) 2540 opts.mode = TEST_MODE_SERVER; 2541 else { 2542 fprintf(stderr, "--mode must be \"client\" or \"server\"\n"); 2543 return EXIT_FAILURE; 2544 } 2545 break; 2546 case 'p': 2547 opts.peer_cid = parse_cid(optarg); 2548 break; 2549 case 'q': 2550 opts.peer_port = parse_port(optarg); 2551 break; 2552 case 'P': 2553 control_port = optarg; 2554 break; 2555 case 'l': 2556 list_tests(test_cases); 2557 break; 2558 case 's': 2559 skip_test(test_cases, ARRAY_SIZE(test_cases) - 1, 2560 optarg); 2561 break; 2562 case 't': 2563 pick_test(test_cases, ARRAY_SIZE(test_cases) - 1, 2564 optarg); 2565 break; 2566 case '?': 2567 default: 2568 usage(); 2569 } 2570 } 2571 2572 if (!control_port) 2573 usage(); 2574 if (opts.mode == TEST_MODE_UNSET) 2575 usage(); 2576 if (opts.peer_cid == VMADDR_CID_ANY) 2577 usage(); 2578 2579 if (!control_host) { 2580 if (opts.mode != TEST_MODE_SERVER) 2581 usage(); 2582 control_host = "0.0.0.0"; 2583 } 2584 2585 control_init(control_host, control_port, 2586 opts.mode == TEST_MODE_SERVER); 2587 2588 run_tests(test_cases, &opts); 2589 2590 control_cleanup(); 2591 return EXIT_SUCCESS; 2592 } 2593