1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vsock_test - vsock.ko test suite 4 * 5 * Copyright (C) 2017 Red Hat, Inc. 6 * 7 * Author: Stefan Hajnoczi <stefanha@redhat.com> 8 */ 9 10 #include <getopt.h> 11 #include <stdio.h> 12 #include <stdlib.h> 13 #include <string.h> 14 #include <errno.h> 15 #include <unistd.h> 16 #include <linux/kernel.h> 17 #include <sys/types.h> 18 #include <sys/socket.h> 19 #include <time.h> 20 #include <sys/mman.h> 21 #include <poll.h> 22 #include <signal.h> 23 #include <sys/ioctl.h> 24 #include <linux/time64.h> 25 #include <pthread.h> 26 #include <fcntl.h> 27 #include <linux/sockios.h> 28 29 #include "vsock_test_zerocopy.h" 30 #include "timeout.h" 31 #include "control.h" 32 #include "util.h" 33 34 /* Basic messages for control_writeulong(), control_readulong() */ 35 #define CONTROL_CONTINUE 1 36 #define CONTROL_DONE 0 37 38 static void test_stream_connection_reset(const struct test_opts *opts) 39 { 40 union { 41 struct sockaddr sa; 42 struct sockaddr_vm svm; 43 } addr = { 44 .svm = { 45 .svm_family = AF_VSOCK, 46 .svm_port = opts->peer_port, 47 .svm_cid = opts->peer_cid, 48 }, 49 }; 50 int ret; 51 int fd; 52 53 fd = socket(AF_VSOCK, SOCK_STREAM, 0); 54 55 timeout_begin(TIMEOUT); 56 do { 57 ret = connect(fd, &addr.sa, sizeof(addr.svm)); 58 timeout_check("connect"); 59 } while (ret < 0 && errno == EINTR); 60 timeout_end(); 61 62 if (ret != -1) { 63 fprintf(stderr, "expected connect(2) failure, got %d\n", ret); 64 exit(EXIT_FAILURE); 65 } 66 if (errno != ECONNRESET) { 67 fprintf(stderr, "unexpected connect(2) errno %d\n", errno); 68 exit(EXIT_FAILURE); 69 } 70 71 close(fd); 72 } 73 74 static void test_stream_bind_only_client(const struct test_opts *opts) 75 { 76 union { 77 struct sockaddr sa; 78 struct sockaddr_vm svm; 79 } addr = { 80 .svm = { 81 .svm_family = AF_VSOCK, 82 .svm_port = opts->peer_port, 83 .svm_cid = opts->peer_cid, 84 }, 85 }; 86 int ret; 87 int fd; 88 89 /* Wait for the server to be ready */ 90 control_expectln("BIND"); 91 92 fd = socket(AF_VSOCK, SOCK_STREAM, 0); 93 94 timeout_begin(TIMEOUT); 95 do { 96 ret = connect(fd, &addr.sa, sizeof(addr.svm)); 97 timeout_check("connect"); 98 } while (ret < 0 && errno == EINTR); 99 timeout_end(); 100 101 if (ret != -1) { 102 fprintf(stderr, "expected connect(2) failure, got %d\n", ret); 103 exit(EXIT_FAILURE); 104 } 105 if (errno != ECONNRESET) { 106 fprintf(stderr, "unexpected connect(2) errno %d\n", errno); 107 exit(EXIT_FAILURE); 108 } 109 110 /* Notify the server that the client has finished */ 111 control_writeln("DONE"); 112 113 close(fd); 114 } 115 116 static void test_stream_bind_only_server(const struct test_opts *opts) 117 { 118 int fd; 119 120 fd = vsock_bind(VMADDR_CID_ANY, opts->peer_port, SOCK_STREAM); 121 122 /* Notify the client that the server is ready */ 123 control_writeln("BIND"); 124 125 /* Wait for the client to finish */ 126 control_expectln("DONE"); 127 128 close(fd); 129 } 130 131 static void test_stream_client_close_client(const struct test_opts *opts) 132 { 133 int fd; 134 135 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 136 if (fd < 0) { 137 perror("connect"); 138 exit(EXIT_FAILURE); 139 } 140 141 send_byte(fd, 1, 0); 142 close(fd); 143 } 144 145 static void test_stream_client_close_server(const struct test_opts *opts) 146 { 147 int fd; 148 149 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 150 if (fd < 0) { 151 perror("accept"); 152 exit(EXIT_FAILURE); 153 } 154 155 /* Wait for the remote to close the connection, before check 156 * -EPIPE error on send. 157 */ 158 vsock_wait_remote_close(fd); 159 160 send_byte(fd, -EPIPE, 0); 161 recv_byte(fd, 1, 0); 162 recv_byte(fd, 0, 0); 163 close(fd); 164 } 165 166 static void test_stream_server_close_client(const struct test_opts *opts) 167 { 168 int fd; 169 170 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 171 if (fd < 0) { 172 perror("connect"); 173 exit(EXIT_FAILURE); 174 } 175 176 /* Wait for the remote to close the connection, before check 177 * -EPIPE error on send. 178 */ 179 vsock_wait_remote_close(fd); 180 181 send_byte(fd, -EPIPE, 0); 182 recv_byte(fd, 1, 0); 183 recv_byte(fd, 0, 0); 184 close(fd); 185 } 186 187 static void test_stream_server_close_server(const struct test_opts *opts) 188 { 189 int fd; 190 191 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 192 if (fd < 0) { 193 perror("accept"); 194 exit(EXIT_FAILURE); 195 } 196 197 send_byte(fd, 1, 0); 198 close(fd); 199 } 200 201 /* With the standard socket sizes, VMCI is able to support about 100 202 * concurrent stream connections. 203 */ 204 #define MULTICONN_NFDS 100 205 206 static void test_stream_multiconn_client(const struct test_opts *opts) 207 { 208 int fds[MULTICONN_NFDS]; 209 int i; 210 211 for (i = 0; i < MULTICONN_NFDS; i++) { 212 fds[i] = vsock_stream_connect(opts->peer_cid, opts->peer_port); 213 if (fds[i] < 0) { 214 perror("connect"); 215 exit(EXIT_FAILURE); 216 } 217 } 218 219 for (i = 0; i < MULTICONN_NFDS; i++) { 220 if (i % 2) 221 recv_byte(fds[i], 1, 0); 222 else 223 send_byte(fds[i], 1, 0); 224 } 225 226 for (i = 0; i < MULTICONN_NFDS; i++) 227 close(fds[i]); 228 } 229 230 static void test_stream_multiconn_server(const struct test_opts *opts) 231 { 232 int fds[MULTICONN_NFDS]; 233 int i; 234 235 for (i = 0; i < MULTICONN_NFDS; i++) { 236 fds[i] = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 237 if (fds[i] < 0) { 238 perror("accept"); 239 exit(EXIT_FAILURE); 240 } 241 } 242 243 for (i = 0; i < MULTICONN_NFDS; i++) { 244 if (i % 2) 245 send_byte(fds[i], 1, 0); 246 else 247 recv_byte(fds[i], 1, 0); 248 } 249 250 for (i = 0; i < MULTICONN_NFDS; i++) 251 close(fds[i]); 252 } 253 254 #define MSG_PEEK_BUF_LEN 64 255 256 static void test_msg_peek_client(const struct test_opts *opts, 257 bool seqpacket) 258 { 259 unsigned char buf[MSG_PEEK_BUF_LEN]; 260 int fd; 261 int i; 262 263 if (seqpacket) 264 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 265 else 266 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 267 268 if (fd < 0) { 269 perror("connect"); 270 exit(EXIT_FAILURE); 271 } 272 273 for (i = 0; i < sizeof(buf); i++) 274 buf[i] = rand() & 0xFF; 275 276 control_expectln("SRVREADY"); 277 278 send_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 279 280 close(fd); 281 } 282 283 static void test_msg_peek_server(const struct test_opts *opts, 284 bool seqpacket) 285 { 286 unsigned char buf_half[MSG_PEEK_BUF_LEN / 2]; 287 unsigned char buf_normal[MSG_PEEK_BUF_LEN]; 288 unsigned char buf_peek[MSG_PEEK_BUF_LEN]; 289 int fd; 290 291 if (seqpacket) 292 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 293 else 294 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 295 296 if (fd < 0) { 297 perror("accept"); 298 exit(EXIT_FAILURE); 299 } 300 301 /* Peek from empty socket. */ 302 recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK | MSG_DONTWAIT, 303 -EAGAIN); 304 305 control_writeln("SRVREADY"); 306 307 /* Peek part of data. */ 308 recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK, sizeof(buf_half)); 309 310 /* Peek whole data. */ 311 recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK, sizeof(buf_peek)); 312 313 /* Compare partial and full peek. */ 314 if (memcmp(buf_half, buf_peek, sizeof(buf_half))) { 315 fprintf(stderr, "Partial peek data mismatch\n"); 316 exit(EXIT_FAILURE); 317 } 318 319 if (seqpacket) { 320 /* This type of socket supports MSG_TRUNC flag, 321 * so check it with MSG_PEEK. We must get length 322 * of the message. 323 */ 324 recv_buf(fd, buf_half, sizeof(buf_half), MSG_PEEK | MSG_TRUNC, 325 sizeof(buf_peek)); 326 } 327 328 recv_buf(fd, buf_normal, sizeof(buf_normal), 0, sizeof(buf_normal)); 329 330 /* Compare full peek and normal read. */ 331 if (memcmp(buf_peek, buf_normal, sizeof(buf_peek))) { 332 fprintf(stderr, "Full peek data mismatch\n"); 333 exit(EXIT_FAILURE); 334 } 335 336 close(fd); 337 } 338 339 static void test_stream_msg_peek_client(const struct test_opts *opts) 340 { 341 return test_msg_peek_client(opts, false); 342 } 343 344 static void test_stream_msg_peek_server(const struct test_opts *opts) 345 { 346 return test_msg_peek_server(opts, false); 347 } 348 349 static void test_stream_peek_after_recv_server(const struct test_opts *opts) 350 { 351 unsigned char buf_normal[MSG_PEEK_BUF_LEN]; 352 unsigned char buf_peek[MSG_PEEK_BUF_LEN]; 353 int fd; 354 355 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 356 if (fd < 0) { 357 perror("accept"); 358 exit(EXIT_FAILURE); 359 } 360 361 control_writeln("SRVREADY"); 362 363 /* Partial recv to advance offset within the skb */ 364 recv_buf(fd, buf_normal, 1, 0, 1); 365 366 /* Peek with a buffer larger than the remaining data */ 367 recv_buf(fd, buf_peek, sizeof(buf_peek), MSG_PEEK, sizeof(buf_peek) - 1); 368 369 /* Consume the remaining data */ 370 recv_buf(fd, buf_normal, sizeof(buf_normal) - 1, 0, sizeof(buf_normal) - 1); 371 372 /* Compare full peek and normal read. */ 373 if (memcmp(buf_peek, buf_normal, sizeof(buf_peek) - 1)) { 374 fprintf(stderr, "Full peek data mismatch\n"); 375 exit(EXIT_FAILURE); 376 } 377 378 close(fd); 379 } 380 381 #define SOCK_BUF_SIZE (2 * 1024 * 1024) 382 #define SOCK_BUF_SIZE_SMALL (64 * 1024) 383 #define MAX_MSG_PAGES 4 384 385 static void test_seqpacket_msg_bounds_client(const struct test_opts *opts) 386 { 387 unsigned long long sock_buf_size; 388 unsigned long curr_hash; 389 size_t max_msg_size; 390 int page_size; 391 int msg_count; 392 int fd; 393 394 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 395 if (fd < 0) { 396 perror("connect"); 397 exit(EXIT_FAILURE); 398 } 399 400 sock_buf_size = SOCK_BUF_SIZE; 401 402 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, 403 sock_buf_size, 404 "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); 405 406 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 407 sock_buf_size, 408 "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); 409 410 /* Wait, until receiver sets buffer size. */ 411 control_expectln("SRVREADY"); 412 413 curr_hash = 0; 414 page_size = getpagesize(); 415 max_msg_size = MAX_MSG_PAGES * page_size; 416 msg_count = SOCK_BUF_SIZE / max_msg_size; 417 418 for (int i = 0; i < msg_count; i++) { 419 size_t buf_size; 420 int flags; 421 void *buf; 422 423 /* Use "small" buffers and "big" buffers. */ 424 if (i & 1) 425 buf_size = page_size + 426 (rand() % (max_msg_size - page_size)); 427 else 428 buf_size = 1 + (rand() % page_size); 429 430 buf = malloc(buf_size); 431 432 if (!buf) { 433 perror("malloc"); 434 exit(EXIT_FAILURE); 435 } 436 437 memset(buf, rand() & 0xff, buf_size); 438 /* Set at least one MSG_EOR + some random. */ 439 if (i == (msg_count / 2) || (rand() & 1)) { 440 flags = MSG_EOR; 441 curr_hash++; 442 } else { 443 flags = 0; 444 } 445 446 send_buf(fd, buf, buf_size, flags, buf_size); 447 448 /* 449 * Hash sum is computed at both client and server in 450 * the same way: 451 * H += hash('message data') 452 * Such hash "controls" both data integrity and message 453 * bounds. After data exchange, both sums are compared 454 * using control socket, and if message bounds wasn't 455 * broken - two values must be equal. 456 */ 457 curr_hash += hash_djb2(buf, buf_size); 458 free(buf); 459 } 460 461 control_writeln("SENDDONE"); 462 control_writeulong(curr_hash); 463 close(fd); 464 } 465 466 static void test_seqpacket_msg_bounds_server(const struct test_opts *opts) 467 { 468 unsigned long long sock_buf_size; 469 unsigned long remote_hash; 470 unsigned long curr_hash; 471 int fd; 472 struct msghdr msg = {0}; 473 struct iovec iov = {0}; 474 475 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 476 if (fd < 0) { 477 perror("accept"); 478 exit(EXIT_FAILURE); 479 } 480 481 sock_buf_size = SOCK_BUF_SIZE; 482 483 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, 484 sock_buf_size, 485 "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); 486 487 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 488 sock_buf_size, 489 "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); 490 491 /* Ready to receive data. */ 492 control_writeln("SRVREADY"); 493 /* Wait, until peer sends whole data. */ 494 control_expectln("SENDDONE"); 495 iov.iov_len = MAX_MSG_PAGES * getpagesize(); 496 iov.iov_base = malloc(iov.iov_len); 497 if (!iov.iov_base) { 498 perror("malloc"); 499 exit(EXIT_FAILURE); 500 } 501 502 msg.msg_iov = &iov; 503 msg.msg_iovlen = 1; 504 505 curr_hash = 0; 506 507 while (1) { 508 ssize_t recv_size; 509 510 recv_size = recvmsg(fd, &msg, 0); 511 512 if (!recv_size) 513 break; 514 515 if (recv_size < 0) { 516 perror("recvmsg"); 517 exit(EXIT_FAILURE); 518 } 519 520 if (msg.msg_flags & MSG_EOR) 521 curr_hash++; 522 523 curr_hash += hash_djb2(msg.msg_iov[0].iov_base, recv_size); 524 } 525 526 free(iov.iov_base); 527 close(fd); 528 remote_hash = control_readulong(); 529 530 if (curr_hash != remote_hash) { 531 fprintf(stderr, "Message bounds broken\n"); 532 exit(EXIT_FAILURE); 533 } 534 } 535 536 #define MESSAGE_TRUNC_SZ 32 537 static void test_seqpacket_msg_trunc_client(const struct test_opts *opts) 538 { 539 int fd; 540 char buf[MESSAGE_TRUNC_SZ]; 541 542 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 543 if (fd < 0) { 544 perror("connect"); 545 exit(EXIT_FAILURE); 546 } 547 548 send_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 549 550 control_writeln("SENDDONE"); 551 close(fd); 552 } 553 554 static void test_seqpacket_msg_trunc_server(const struct test_opts *opts) 555 { 556 int fd; 557 char buf[MESSAGE_TRUNC_SZ / 2]; 558 struct msghdr msg = {0}; 559 struct iovec iov = {0}; 560 561 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 562 if (fd < 0) { 563 perror("accept"); 564 exit(EXIT_FAILURE); 565 } 566 567 control_expectln("SENDDONE"); 568 iov.iov_base = buf; 569 iov.iov_len = sizeof(buf); 570 msg.msg_iov = &iov; 571 msg.msg_iovlen = 1; 572 573 ssize_t ret = recvmsg(fd, &msg, MSG_TRUNC); 574 575 if (ret != MESSAGE_TRUNC_SZ) { 576 printf("%zi\n", ret); 577 perror("MSG_TRUNC doesn't work"); 578 exit(EXIT_FAILURE); 579 } 580 581 if (!(msg.msg_flags & MSG_TRUNC)) { 582 fprintf(stderr, "MSG_TRUNC expected\n"); 583 exit(EXIT_FAILURE); 584 } 585 586 close(fd); 587 } 588 589 static time_t current_nsec(void) 590 { 591 struct timespec ts; 592 593 if (clock_gettime(CLOCK_REALTIME, &ts)) { 594 perror("clock_gettime(3) failed"); 595 exit(EXIT_FAILURE); 596 } 597 598 return (ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec; 599 } 600 601 #define RCVTIMEO_TIMEOUT_SEC 1 602 #define READ_OVERHEAD_NSEC 250000000 /* 0.25 sec */ 603 604 static void test_seqpacket_timeout_client(const struct test_opts *opts) 605 { 606 int fd; 607 struct timeval tv; 608 char dummy; 609 time_t read_enter_ns; 610 time_t read_overhead_ns; 611 612 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 613 if (fd < 0) { 614 perror("connect"); 615 exit(EXIT_FAILURE); 616 } 617 618 tv.tv_sec = RCVTIMEO_TIMEOUT_SEC; 619 tv.tv_usec = 0; 620 621 setsockopt_timeval_check(fd, SOL_SOCKET, SO_RCVTIMEO, tv, 622 "setsockopt(SO_RCVTIMEO)"); 623 624 read_enter_ns = current_nsec(); 625 626 if (read(fd, &dummy, sizeof(dummy)) != -1) { 627 fprintf(stderr, 628 "expected 'dummy' read(2) failure\n"); 629 exit(EXIT_FAILURE); 630 } 631 632 if (errno != EAGAIN) { 633 perror("EAGAIN expected"); 634 exit(EXIT_FAILURE); 635 } 636 637 read_overhead_ns = current_nsec() - read_enter_ns - 638 NSEC_PER_SEC * RCVTIMEO_TIMEOUT_SEC; 639 640 if (read_overhead_ns > READ_OVERHEAD_NSEC) { 641 fprintf(stderr, 642 "too much time in read(2), %lu > %i ns\n", 643 read_overhead_ns, READ_OVERHEAD_NSEC); 644 exit(EXIT_FAILURE); 645 } 646 647 control_writeln("WAITDONE"); 648 close(fd); 649 } 650 651 static void test_seqpacket_timeout_server(const struct test_opts *opts) 652 { 653 int fd; 654 655 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 656 if (fd < 0) { 657 perror("accept"); 658 exit(EXIT_FAILURE); 659 } 660 661 control_expectln("WAITDONE"); 662 close(fd); 663 } 664 665 static void test_seqpacket_bigmsg_client(const struct test_opts *opts) 666 { 667 unsigned long long sock_buf_size; 668 size_t buf_size; 669 socklen_t len; 670 void *data; 671 int fd; 672 673 len = sizeof(sock_buf_size); 674 675 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 676 if (fd < 0) { 677 perror("connect"); 678 exit(EXIT_FAILURE); 679 } 680 681 if (getsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 682 &sock_buf_size, &len)) { 683 perror("getsockopt"); 684 exit(EXIT_FAILURE); 685 } 686 687 sock_buf_size++; 688 689 /* size_t can be < unsigned long long */ 690 buf_size = (size_t)sock_buf_size; 691 if (buf_size != sock_buf_size) { 692 fprintf(stderr, "Returned BUFFER_SIZE too large\n"); 693 exit(EXIT_FAILURE); 694 } 695 696 data = malloc(buf_size); 697 if (!data) { 698 perror("malloc"); 699 exit(EXIT_FAILURE); 700 } 701 702 send_buf(fd, data, buf_size, 0, -EMSGSIZE); 703 704 control_writeln("CLISENT"); 705 706 free(data); 707 close(fd); 708 } 709 710 static void test_seqpacket_bigmsg_server(const struct test_opts *opts) 711 { 712 int fd; 713 714 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 715 if (fd < 0) { 716 perror("accept"); 717 exit(EXIT_FAILURE); 718 } 719 720 control_expectln("CLISENT"); 721 722 close(fd); 723 } 724 725 #define BUF_PATTERN_1 'a' 726 #define BUF_PATTERN_2 'b' 727 728 static void test_seqpacket_invalid_rec_buffer_client(const struct test_opts *opts) 729 { 730 int fd; 731 unsigned char *buf1; 732 unsigned char *buf2; 733 int buf_size = getpagesize() * 3; 734 735 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 736 if (fd < 0) { 737 perror("connect"); 738 exit(EXIT_FAILURE); 739 } 740 741 buf1 = malloc(buf_size); 742 if (!buf1) { 743 perror("'malloc()' for 'buf1'"); 744 exit(EXIT_FAILURE); 745 } 746 747 buf2 = malloc(buf_size); 748 if (!buf2) { 749 perror("'malloc()' for 'buf2'"); 750 exit(EXIT_FAILURE); 751 } 752 753 memset(buf1, BUF_PATTERN_1, buf_size); 754 memset(buf2, BUF_PATTERN_2, buf_size); 755 756 send_buf(fd, buf1, buf_size, 0, buf_size); 757 758 send_buf(fd, buf2, buf_size, 0, buf_size); 759 760 close(fd); 761 } 762 763 static void test_seqpacket_invalid_rec_buffer_server(const struct test_opts *opts) 764 { 765 int fd; 766 unsigned char *broken_buf; 767 unsigned char *valid_buf; 768 int page_size = getpagesize(); 769 int buf_size = page_size * 3; 770 ssize_t res; 771 int prot = PROT_READ | PROT_WRITE; 772 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 773 int i; 774 775 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 776 if (fd < 0) { 777 perror("accept"); 778 exit(EXIT_FAILURE); 779 } 780 781 /* Setup first buffer. */ 782 broken_buf = mmap(NULL, buf_size, prot, flags, -1, 0); 783 if (broken_buf == MAP_FAILED) { 784 perror("mmap for 'broken_buf'"); 785 exit(EXIT_FAILURE); 786 } 787 788 /* Unmap "hole" in buffer. */ 789 if (munmap(broken_buf + page_size, page_size)) { 790 perror("'broken_buf' setup"); 791 exit(EXIT_FAILURE); 792 } 793 794 valid_buf = mmap(NULL, buf_size, prot, flags, -1, 0); 795 if (valid_buf == MAP_FAILED) { 796 perror("mmap for 'valid_buf'"); 797 exit(EXIT_FAILURE); 798 } 799 800 /* Try to fill buffer with unmapped middle. */ 801 res = read(fd, broken_buf, buf_size); 802 if (res != -1) { 803 fprintf(stderr, 804 "expected 'broken_buf' read(2) failure, got %zi\n", 805 res); 806 exit(EXIT_FAILURE); 807 } 808 809 if (errno != EFAULT) { 810 perror("unexpected errno of 'broken_buf'"); 811 exit(EXIT_FAILURE); 812 } 813 814 /* Try to fill valid buffer. */ 815 res = read(fd, valid_buf, buf_size); 816 if (res < 0) { 817 perror("unexpected 'valid_buf' read(2) failure"); 818 exit(EXIT_FAILURE); 819 } 820 821 if (res != buf_size) { 822 fprintf(stderr, 823 "invalid 'valid_buf' read(2), expected %i, got %zi\n", 824 buf_size, res); 825 exit(EXIT_FAILURE); 826 } 827 828 for (i = 0; i < buf_size; i++) { 829 if (valid_buf[i] != BUF_PATTERN_2) { 830 fprintf(stderr, 831 "invalid pattern for 'valid_buf' at %i, expected %hhX, got %hhX\n", 832 i, BUF_PATTERN_2, valid_buf[i]); 833 exit(EXIT_FAILURE); 834 } 835 } 836 837 /* Unmap buffers. */ 838 munmap(broken_buf, page_size); 839 munmap(broken_buf + page_size * 2, page_size); 840 munmap(valid_buf, buf_size); 841 close(fd); 842 } 843 844 #define RCVLOWAT_BUF_SIZE 128 845 846 static void test_stream_poll_rcvlowat_server(const struct test_opts *opts) 847 { 848 int fd; 849 int i; 850 851 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 852 if (fd < 0) { 853 perror("accept"); 854 exit(EXIT_FAILURE); 855 } 856 857 /* Send 1 byte. */ 858 send_byte(fd, 1, 0); 859 860 control_writeln("SRVSENT"); 861 862 /* Wait until client is ready to receive rest of data. */ 863 control_expectln("CLNSENT"); 864 865 for (i = 0; i < RCVLOWAT_BUF_SIZE - 1; i++) 866 send_byte(fd, 1, 0); 867 868 /* Keep socket in active state. */ 869 control_expectln("POLLDONE"); 870 871 close(fd); 872 } 873 874 static void test_stream_poll_rcvlowat_client(const struct test_opts *opts) 875 { 876 int lowat_val = RCVLOWAT_BUF_SIZE; 877 char buf[RCVLOWAT_BUF_SIZE]; 878 struct pollfd fds; 879 short poll_flags; 880 int fd; 881 882 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 883 if (fd < 0) { 884 perror("connect"); 885 exit(EXIT_FAILURE); 886 } 887 888 setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, 889 lowat_val, "setsockopt(SO_RCVLOWAT)"); 890 891 control_expectln("SRVSENT"); 892 893 /* At this point, server sent 1 byte. */ 894 fds.fd = fd; 895 poll_flags = POLLIN | POLLRDNORM; 896 fds.events = poll_flags; 897 898 /* Try to wait for 1 sec. */ 899 if (poll(&fds, 1, 1000) < 0) { 900 perror("poll"); 901 exit(EXIT_FAILURE); 902 } 903 904 /* poll() must return nothing. */ 905 if (fds.revents) { 906 fprintf(stderr, "Unexpected poll result %hx\n", 907 fds.revents); 908 exit(EXIT_FAILURE); 909 } 910 911 /* Tell server to send rest of data. */ 912 control_writeln("CLNSENT"); 913 914 /* Poll for data. */ 915 if (poll(&fds, 1, 10000) < 0) { 916 perror("poll"); 917 exit(EXIT_FAILURE); 918 } 919 920 /* Only these two bits are expected. */ 921 if (fds.revents != poll_flags) { 922 fprintf(stderr, "Unexpected poll result %hx\n", 923 fds.revents); 924 exit(EXIT_FAILURE); 925 } 926 927 /* Use MSG_DONTWAIT, if call is going to wait, EAGAIN 928 * will be returned. 929 */ 930 recv_buf(fd, buf, sizeof(buf), MSG_DONTWAIT, RCVLOWAT_BUF_SIZE); 931 932 control_writeln("POLLDONE"); 933 934 close(fd); 935 } 936 937 #define INV_BUF_TEST_DATA_LEN 512 938 939 static void test_inv_buf_client(const struct test_opts *opts, bool stream) 940 { 941 unsigned char data[INV_BUF_TEST_DATA_LEN] = {0}; 942 ssize_t expected_ret; 943 int fd; 944 945 if (stream) 946 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 947 else 948 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); 949 950 if (fd < 0) { 951 perror("connect"); 952 exit(EXIT_FAILURE); 953 } 954 955 control_expectln("SENDDONE"); 956 957 /* Use invalid buffer here. */ 958 recv_buf(fd, NULL, sizeof(data), 0, -EFAULT); 959 960 if (stream) { 961 /* For SOCK_STREAM we must continue reading. */ 962 expected_ret = sizeof(data); 963 } else { 964 /* For SOCK_SEQPACKET socket's queue must be empty. */ 965 expected_ret = -EAGAIN; 966 } 967 968 recv_buf(fd, data, sizeof(data), MSG_DONTWAIT, expected_ret); 969 970 control_writeln("DONE"); 971 972 close(fd); 973 } 974 975 static void test_inv_buf_server(const struct test_opts *opts, bool stream) 976 { 977 unsigned char data[INV_BUF_TEST_DATA_LEN] = {0}; 978 int fd; 979 980 if (stream) 981 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 982 else 983 fd = vsock_seqpacket_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 984 985 if (fd < 0) { 986 perror("accept"); 987 exit(EXIT_FAILURE); 988 } 989 990 send_buf(fd, data, sizeof(data), 0, sizeof(data)); 991 992 control_writeln("SENDDONE"); 993 994 control_expectln("DONE"); 995 996 close(fd); 997 } 998 999 static void test_stream_inv_buf_client(const struct test_opts *opts) 1000 { 1001 test_inv_buf_client(opts, true); 1002 } 1003 1004 static void test_stream_inv_buf_server(const struct test_opts *opts) 1005 { 1006 test_inv_buf_server(opts, true); 1007 } 1008 1009 static void test_seqpacket_inv_buf_client(const struct test_opts *opts) 1010 { 1011 test_inv_buf_client(opts, false); 1012 } 1013 1014 static void test_seqpacket_inv_buf_server(const struct test_opts *opts) 1015 { 1016 test_inv_buf_server(opts, false); 1017 } 1018 1019 #define HELLO_STR "HELLO" 1020 #define WORLD_STR "WORLD" 1021 1022 static void test_stream_virtio_skb_merge_client(const struct test_opts *opts) 1023 { 1024 int fd; 1025 1026 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1027 if (fd < 0) { 1028 perror("connect"); 1029 exit(EXIT_FAILURE); 1030 } 1031 1032 /* Send first skbuff. */ 1033 send_buf(fd, HELLO_STR, strlen(HELLO_STR), 0, strlen(HELLO_STR)); 1034 1035 control_writeln("SEND0"); 1036 /* Peer reads part of first skbuff. */ 1037 control_expectln("REPLY0"); 1038 1039 /* Send second skbuff, it will be appended to the first. */ 1040 send_buf(fd, WORLD_STR, strlen(WORLD_STR), 0, strlen(WORLD_STR)); 1041 1042 control_writeln("SEND1"); 1043 /* Peer reads merged skbuff packet. */ 1044 control_expectln("REPLY1"); 1045 1046 close(fd); 1047 } 1048 1049 static void test_stream_virtio_skb_merge_server(const struct test_opts *opts) 1050 { 1051 size_t read = 0, to_read; 1052 unsigned char buf[64]; 1053 int fd; 1054 1055 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1056 if (fd < 0) { 1057 perror("accept"); 1058 exit(EXIT_FAILURE); 1059 } 1060 1061 control_expectln("SEND0"); 1062 1063 /* Read skbuff partially. */ 1064 to_read = 2; 1065 recv_buf(fd, buf + read, to_read, 0, to_read); 1066 read += to_read; 1067 1068 control_writeln("REPLY0"); 1069 control_expectln("SEND1"); 1070 1071 /* Read the rest of both buffers */ 1072 to_read = strlen(HELLO_STR WORLD_STR) - read; 1073 recv_buf(fd, buf + read, to_read, 0, to_read); 1074 read += to_read; 1075 1076 /* No more bytes should be there */ 1077 to_read = sizeof(buf) - read; 1078 recv_buf(fd, buf + read, to_read, MSG_DONTWAIT, -EAGAIN); 1079 1080 if (memcmp(buf, HELLO_STR WORLD_STR, strlen(HELLO_STR WORLD_STR))) { 1081 fprintf(stderr, "pattern mismatch\n"); 1082 exit(EXIT_FAILURE); 1083 } 1084 1085 control_writeln("REPLY1"); 1086 1087 close(fd); 1088 } 1089 1090 static void test_seqpacket_msg_peek_client(const struct test_opts *opts) 1091 { 1092 return test_msg_peek_client(opts, true); 1093 } 1094 1095 static void test_seqpacket_msg_peek_server(const struct test_opts *opts) 1096 { 1097 return test_msg_peek_server(opts, true); 1098 } 1099 1100 static sig_atomic_t have_sigpipe; 1101 1102 static void sigpipe(int signo) 1103 { 1104 have_sigpipe = 1; 1105 } 1106 1107 #define SEND_SLEEP_USEC (10 * 1000) 1108 1109 static void test_stream_check_sigpipe(int fd) 1110 { 1111 ssize_t res; 1112 1113 have_sigpipe = 0; 1114 1115 /* When the other peer calls shutdown(SHUT_RD), there is a chance that 1116 * the send() call could occur before the message carrying the close 1117 * information arrives over the transport. In such cases, the send() 1118 * might still succeed. To avoid this race, let's retry the send() call 1119 * a few times, ensuring the test is more reliable. 1120 */ 1121 timeout_begin(TIMEOUT); 1122 while(1) { 1123 res = send(fd, "A", 1, 0); 1124 if (res == -1 && errno != EINTR) 1125 break; 1126 1127 /* Sleep a little before trying again to avoid flooding the 1128 * other peer and filling its receive buffer, causing 1129 * false-negative. 1130 */ 1131 timeout_usleep(SEND_SLEEP_USEC); 1132 timeout_check("send"); 1133 } 1134 timeout_end(); 1135 1136 if (errno != EPIPE) { 1137 fprintf(stderr, "unexpected send(2) errno %d\n", errno); 1138 exit(EXIT_FAILURE); 1139 } 1140 if (!have_sigpipe) { 1141 fprintf(stderr, "SIGPIPE expected\n"); 1142 exit(EXIT_FAILURE); 1143 } 1144 1145 have_sigpipe = 0; 1146 1147 timeout_begin(TIMEOUT); 1148 while(1) { 1149 res = send(fd, "A", 1, MSG_NOSIGNAL); 1150 if (res == -1 && errno != EINTR) 1151 break; 1152 1153 timeout_usleep(SEND_SLEEP_USEC); 1154 timeout_check("send"); 1155 } 1156 timeout_end(); 1157 1158 if (errno != EPIPE) { 1159 fprintf(stderr, "unexpected send(2) errno %d\n", errno); 1160 exit(EXIT_FAILURE); 1161 } 1162 if (have_sigpipe) { 1163 fprintf(stderr, "SIGPIPE not expected\n"); 1164 exit(EXIT_FAILURE); 1165 } 1166 } 1167 1168 static void test_stream_shutwr_client(const struct test_opts *opts) 1169 { 1170 int fd; 1171 1172 struct sigaction act = { 1173 .sa_handler = sigpipe, 1174 }; 1175 1176 sigaction(SIGPIPE, &act, NULL); 1177 1178 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1179 if (fd < 0) { 1180 perror("connect"); 1181 exit(EXIT_FAILURE); 1182 } 1183 1184 if (shutdown(fd, SHUT_WR)) { 1185 perror("shutdown"); 1186 exit(EXIT_FAILURE); 1187 } 1188 1189 test_stream_check_sigpipe(fd); 1190 1191 control_writeln("CLIENTDONE"); 1192 1193 close(fd); 1194 } 1195 1196 static void test_stream_shutwr_server(const struct test_opts *opts) 1197 { 1198 int fd; 1199 1200 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1201 if (fd < 0) { 1202 perror("accept"); 1203 exit(EXIT_FAILURE); 1204 } 1205 1206 control_expectln("CLIENTDONE"); 1207 1208 close(fd); 1209 } 1210 1211 static void test_stream_shutrd_client(const struct test_opts *opts) 1212 { 1213 int fd; 1214 1215 struct sigaction act = { 1216 .sa_handler = sigpipe, 1217 }; 1218 1219 sigaction(SIGPIPE, &act, NULL); 1220 1221 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1222 if (fd < 0) { 1223 perror("connect"); 1224 exit(EXIT_FAILURE); 1225 } 1226 1227 control_expectln("SHUTRDDONE"); 1228 1229 test_stream_check_sigpipe(fd); 1230 1231 control_writeln("CLIENTDONE"); 1232 1233 close(fd); 1234 } 1235 1236 static void test_stream_shutrd_server(const struct test_opts *opts) 1237 { 1238 int fd; 1239 1240 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1241 if (fd < 0) { 1242 perror("accept"); 1243 exit(EXIT_FAILURE); 1244 } 1245 1246 if (shutdown(fd, SHUT_RD)) { 1247 perror("shutdown"); 1248 exit(EXIT_FAILURE); 1249 } 1250 1251 control_writeln("SHUTRDDONE"); 1252 control_expectln("CLIENTDONE"); 1253 1254 close(fd); 1255 } 1256 1257 static void test_double_bind_connect_server(const struct test_opts *opts) 1258 { 1259 int listen_fd, client_fd, i; 1260 struct sockaddr_vm sa_client; 1261 socklen_t socklen_client = sizeof(sa_client); 1262 1263 listen_fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port); 1264 1265 for (i = 0; i < 2; i++) { 1266 control_writeln("LISTENING"); 1267 1268 timeout_begin(TIMEOUT); 1269 do { 1270 client_fd = accept(listen_fd, (struct sockaddr *)&sa_client, 1271 &socklen_client); 1272 timeout_check("accept"); 1273 } while (client_fd < 0 && errno == EINTR); 1274 timeout_end(); 1275 1276 if (client_fd < 0) { 1277 perror("accept"); 1278 exit(EXIT_FAILURE); 1279 } 1280 1281 /* Waiting for remote peer to close connection */ 1282 vsock_wait_remote_close(client_fd); 1283 } 1284 1285 close(listen_fd); 1286 } 1287 1288 static void test_double_bind_connect_client(const struct test_opts *opts) 1289 { 1290 int i, client_fd; 1291 1292 for (i = 0; i < 2; i++) { 1293 /* Wait until server is ready to accept a new connection */ 1294 control_expectln("LISTENING"); 1295 1296 /* We use 'peer_port + 1' as "some" port for the 'bind()' 1297 * call. It is safe for overflow, but must be considered, 1298 * when running multiple test applications simultaneously 1299 * where 'peer-port' argument differs by 1. 1300 */ 1301 client_fd = vsock_bind_connect(opts->peer_cid, opts->peer_port, 1302 opts->peer_port + 1, SOCK_STREAM); 1303 1304 close(client_fd); 1305 } 1306 } 1307 1308 #define MSG_BUF_IOCTL_LEN 64 1309 static void test_unsent_bytes_server(const struct test_opts *opts, int type) 1310 { 1311 unsigned char buf[MSG_BUF_IOCTL_LEN]; 1312 int client_fd; 1313 1314 client_fd = vsock_accept(VMADDR_CID_ANY, opts->peer_port, NULL, type); 1315 if (client_fd < 0) { 1316 perror("accept"); 1317 exit(EXIT_FAILURE); 1318 } 1319 1320 recv_buf(client_fd, buf, sizeof(buf), 0, sizeof(buf)); 1321 control_writeln("RECEIVED"); 1322 1323 close(client_fd); 1324 } 1325 1326 static void test_unsent_bytes_client(const struct test_opts *opts, int type) 1327 { 1328 unsigned char buf[MSG_BUF_IOCTL_LEN]; 1329 int fd; 1330 1331 fd = vsock_connect(opts->peer_cid, opts->peer_port, type); 1332 if (fd < 0) { 1333 perror("connect"); 1334 exit(EXIT_FAILURE); 1335 } 1336 1337 for (int i = 0; i < sizeof(buf); i++) 1338 buf[i] = rand() & 0xFF; 1339 1340 send_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 1341 control_expectln("RECEIVED"); 1342 1343 /* SIOCOUTQ isn't guaranteed to instantly track sent data. Even though 1344 * the "RECEIVED" message means that the other side has received the 1345 * data, there can be a delay in our kernel before updating the "unsent 1346 * bytes" counter. vsock_wait_sent() will repeat SIOCOUTQ until it 1347 * returns 0. 1348 */ 1349 if (!vsock_wait_sent(fd)) 1350 fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n"); 1351 1352 close(fd); 1353 } 1354 1355 static void test_unread_bytes_server(const struct test_opts *opts, int type) 1356 { 1357 unsigned char buf[MSG_BUF_IOCTL_LEN]; 1358 int client_fd; 1359 1360 client_fd = vsock_accept(VMADDR_CID_ANY, opts->peer_port, NULL, type); 1361 if (client_fd < 0) { 1362 perror("accept"); 1363 exit(EXIT_FAILURE); 1364 } 1365 1366 for (int i = 0; i < sizeof(buf); i++) 1367 buf[i] = rand() & 0xFF; 1368 1369 send_buf(client_fd, buf, sizeof(buf), 0, sizeof(buf)); 1370 control_writeln("SENT"); 1371 1372 close(client_fd); 1373 } 1374 1375 static void test_unread_bytes_client(const struct test_opts *opts, int type) 1376 { 1377 unsigned char buf[MSG_BUF_IOCTL_LEN]; 1378 int fd; 1379 1380 fd = vsock_connect(opts->peer_cid, opts->peer_port, type); 1381 if (fd < 0) { 1382 perror("connect"); 1383 exit(EXIT_FAILURE); 1384 } 1385 1386 control_expectln("SENT"); 1387 /* The data has arrived but has not been read. The expected is 1388 * MSG_BUF_IOCTL_LEN. 1389 */ 1390 if (!vsock_ioctl_int(fd, SIOCINQ, MSG_BUF_IOCTL_LEN)) { 1391 fprintf(stderr, "Test skipped, SIOCINQ not supported.\n"); 1392 goto out; 1393 } 1394 1395 recv_buf(fd, buf, sizeof(buf), 0, sizeof(buf)); 1396 /* All data has been consumed, so the expected is 0. */ 1397 vsock_ioctl_int(fd, SIOCINQ, 0); 1398 1399 out: 1400 close(fd); 1401 } 1402 1403 static void test_stream_unsent_bytes_client(const struct test_opts *opts) 1404 { 1405 test_unsent_bytes_client(opts, SOCK_STREAM); 1406 } 1407 1408 static void test_stream_unsent_bytes_server(const struct test_opts *opts) 1409 { 1410 test_unsent_bytes_server(opts, SOCK_STREAM); 1411 } 1412 1413 static void test_seqpacket_unsent_bytes_client(const struct test_opts *opts) 1414 { 1415 test_unsent_bytes_client(opts, SOCK_SEQPACKET); 1416 } 1417 1418 static void test_seqpacket_unsent_bytes_server(const struct test_opts *opts) 1419 { 1420 test_unsent_bytes_server(opts, SOCK_SEQPACKET); 1421 } 1422 1423 static void test_stream_unread_bytes_client(const struct test_opts *opts) 1424 { 1425 test_unread_bytes_client(opts, SOCK_STREAM); 1426 } 1427 1428 static void test_stream_unread_bytes_server(const struct test_opts *opts) 1429 { 1430 test_unread_bytes_server(opts, SOCK_STREAM); 1431 } 1432 1433 static void test_seqpacket_unread_bytes_client(const struct test_opts *opts) 1434 { 1435 test_unread_bytes_client(opts, SOCK_SEQPACKET); 1436 } 1437 1438 static void test_seqpacket_unread_bytes_server(const struct test_opts *opts) 1439 { 1440 test_unread_bytes_server(opts, SOCK_SEQPACKET); 1441 } 1442 1443 #define RCVLOWAT_CREDIT_UPD_BUF_SIZE (1024 * 128) 1444 /* This define is the same as in 'include/linux/virtio_vsock.h': 1445 * it is used to decide when to send credit update message during 1446 * reading from rx queue of a socket. Value and its usage in 1447 * kernel is important for this test. 1448 */ 1449 #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) 1450 1451 static void test_stream_rcvlowat_def_cred_upd_client(const struct test_opts *opts) 1452 { 1453 size_t buf_size; 1454 void *buf; 1455 int fd; 1456 1457 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1458 if (fd < 0) { 1459 perror("connect"); 1460 exit(EXIT_FAILURE); 1461 } 1462 1463 /* Send 1 byte more than peer's buffer size. */ 1464 buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE + 1; 1465 1466 buf = malloc(buf_size); 1467 if (!buf) { 1468 perror("malloc"); 1469 exit(EXIT_FAILURE); 1470 } 1471 1472 /* Wait until peer sets needed buffer size. */ 1473 recv_byte(fd, 1, 0); 1474 1475 if (send(fd, buf, buf_size, 0) != buf_size) { 1476 perror("send failed"); 1477 exit(EXIT_FAILURE); 1478 } 1479 1480 free(buf); 1481 close(fd); 1482 } 1483 1484 static void test_stream_credit_update_test(const struct test_opts *opts, 1485 bool low_rx_bytes_test) 1486 { 1487 int recv_buf_size; 1488 struct pollfd fds; 1489 size_t buf_size; 1490 unsigned long long sock_buf_size; 1491 void *buf; 1492 int fd; 1493 1494 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1495 if (fd < 0) { 1496 perror("accept"); 1497 exit(EXIT_FAILURE); 1498 } 1499 1500 buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE; 1501 1502 /* size_t can be < unsigned long long */ 1503 sock_buf_size = buf_size; 1504 1505 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 1506 sock_buf_size, 1507 "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); 1508 1509 if (low_rx_bytes_test) { 1510 /* Set new SO_RCVLOWAT here. This enables sending credit 1511 * update when number of bytes if our rx queue become < 1512 * SO_RCVLOWAT value. 1513 */ 1514 recv_buf_size = 1 + VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; 1515 1516 setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, 1517 recv_buf_size, "setsockopt(SO_RCVLOWAT)"); 1518 } 1519 1520 /* Send one dummy byte here, because 'setsockopt()' above also 1521 * sends special packet which tells sender to update our buffer 1522 * size. This 'send_byte()' will serialize such packet with data 1523 * reads in a loop below. Sender starts transmission only when 1524 * it receives this single byte. 1525 */ 1526 send_byte(fd, 1, 0); 1527 1528 buf = malloc(buf_size); 1529 if (!buf) { 1530 perror("malloc"); 1531 exit(EXIT_FAILURE); 1532 } 1533 1534 /* Wait until there will be 128KB of data in rx queue. */ 1535 recv_buf(fd, buf, buf_size, MSG_PEEK, buf_size); 1536 1537 /* There is 128KB of data in the socket's rx queue, dequeue first 1538 * 64KB, credit update is sent if 'low_rx_bytes_test' == true. 1539 * Otherwise, credit update is sent in 'if (!low_rx_bytes_test)'. 1540 */ 1541 recv_buf_size = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; 1542 recv_buf(fd, buf, recv_buf_size, 0, recv_buf_size); 1543 1544 if (!low_rx_bytes_test) { 1545 recv_buf_size++; 1546 1547 /* Updating SO_RCVLOWAT will send credit update. */ 1548 setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, 1549 recv_buf_size, "setsockopt(SO_RCVLOWAT)"); 1550 } 1551 1552 fds.fd = fd; 1553 fds.events = POLLIN | POLLRDNORM | POLLERR | 1554 POLLRDHUP | POLLHUP; 1555 1556 /* This 'poll()' will return once we receive last byte 1557 * sent by client. 1558 */ 1559 if (poll(&fds, 1, -1) < 0) { 1560 perror("poll"); 1561 exit(EXIT_FAILURE); 1562 } 1563 1564 if (fds.revents & POLLERR) { 1565 fprintf(stderr, "'poll()' error\n"); 1566 exit(EXIT_FAILURE); 1567 } 1568 1569 if (fds.revents & (POLLIN | POLLRDNORM)) { 1570 recv_buf(fd, buf, recv_buf_size, MSG_DONTWAIT, recv_buf_size); 1571 } else { 1572 /* These flags must be set, as there is at 1573 * least 64KB of data ready to read. 1574 */ 1575 fprintf(stderr, "POLLIN | POLLRDNORM expected\n"); 1576 exit(EXIT_FAILURE); 1577 } 1578 1579 free(buf); 1580 close(fd); 1581 } 1582 1583 static void test_stream_cred_upd_on_low_rx_bytes(const struct test_opts *opts) 1584 { 1585 test_stream_credit_update_test(opts, true); 1586 } 1587 1588 static void test_stream_cred_upd_on_set_rcvlowat(const struct test_opts *opts) 1589 { 1590 test_stream_credit_update_test(opts, false); 1591 } 1592 1593 /* The goal of test leak_acceptq is to stress the race between connect() and 1594 * close(listener). Implementation of client/server loops boils down to: 1595 * 1596 * client server 1597 * ------ ------ 1598 * write(CONTINUE) 1599 * expect(CONTINUE) 1600 * listen() 1601 * write(LISTENING) 1602 * expect(LISTENING) 1603 * connect() close() 1604 */ 1605 #define ACCEPTQ_LEAK_RACE_TIMEOUT 2 /* seconds */ 1606 1607 static void test_stream_leak_acceptq_client(const struct test_opts *opts) 1608 { 1609 time_t tout; 1610 int fd; 1611 1612 tout = current_nsec() + ACCEPTQ_LEAK_RACE_TIMEOUT * NSEC_PER_SEC; 1613 do { 1614 control_writeulong(CONTROL_CONTINUE); 1615 1616 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1617 if (fd >= 0) 1618 close(fd); 1619 } while (current_nsec() < tout); 1620 1621 control_writeulong(CONTROL_DONE); 1622 } 1623 1624 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */ 1625 static void test_stream_leak_acceptq_server(const struct test_opts *opts) 1626 { 1627 int fd; 1628 1629 while (control_readulong() == CONTROL_CONTINUE) { 1630 fd = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port); 1631 control_writeln("LISTENING"); 1632 close(fd); 1633 } 1634 } 1635 1636 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */ 1637 static void test_stream_msgzcopy_leak_errq_client(const struct test_opts *opts) 1638 { 1639 struct pollfd fds = { 0 }; 1640 int fd; 1641 1642 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1643 if (fd < 0) { 1644 perror("connect"); 1645 exit(EXIT_FAILURE); 1646 } 1647 1648 enable_so_zerocopy_check(fd); 1649 send_byte(fd, 1, MSG_ZEROCOPY); 1650 1651 fds.fd = fd; 1652 fds.events = 0; 1653 if (poll(&fds, 1, -1) < 0) { 1654 perror("poll"); 1655 exit(EXIT_FAILURE); 1656 } 1657 1658 close(fd); 1659 } 1660 1661 static void test_stream_msgzcopy_leak_errq_server(const struct test_opts *opts) 1662 { 1663 int fd; 1664 1665 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1666 if (fd < 0) { 1667 perror("accept"); 1668 exit(EXIT_FAILURE); 1669 } 1670 1671 recv_byte(fd, 1, 0); 1672 vsock_wait_remote_close(fd); 1673 close(fd); 1674 } 1675 1676 /* Test msgzcopy_leak_zcskb is meant to exercise sendmsg() error handling path, 1677 * that might leak an skb. The idea is to fail virtio_transport_init_zcopy_skb() 1678 * by hitting net.core.optmem_max limit in sock_omalloc(), specifically 1679 * 1680 * vsock_connectible_sendmsg 1681 * virtio_transport_stream_enqueue 1682 * virtio_transport_send_pkt_info 1683 * virtio_transport_init_zcopy_skb 1684 * . msg_zerocopy_realloc 1685 * . msg_zerocopy_alloc 1686 * . sock_omalloc 1687 * . sk_omem_alloc + size > sysctl_optmem_max 1688 * return -ENOMEM 1689 * 1690 * We abuse the implementation detail of net/socket.c:____sys_sendmsg(). 1691 * sk_omem_alloc can be precisely bumped by sock_kmalloc(), as it is used to 1692 * fetch user-provided control data. 1693 * 1694 * While this approach works for now, it relies on assumptions regarding the 1695 * implementation and configuration (for example, order of net.core.optmem_max 1696 * can not exceed MAX_PAGE_ORDER), which may not hold in the future. A more 1697 * resilient testing could be implemented by leveraging the Fault injection 1698 * framework (CONFIG_FAULT_INJECTION), e.g. 1699 * 1700 * client# echo N > /sys/kernel/debug/failslab/ignore-gfp-wait 1701 * client# echo 0 > /sys/kernel/debug/failslab/verbose 1702 * 1703 * void client(const struct test_opts *opts) 1704 * { 1705 * char buf[16]; 1706 * int f, s, i; 1707 * 1708 * f = open("/proc/self/fail-nth", O_WRONLY); 1709 * 1710 * for (i = 1; i < 32; i++) { 1711 * control_writeulong(CONTROL_CONTINUE); 1712 * 1713 * s = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1714 * enable_so_zerocopy_check(s); 1715 * 1716 * sprintf(buf, "%d", i); 1717 * write(f, buf, strlen(buf)); 1718 * 1719 * send(s, &(char){ 0 }, 1, MSG_ZEROCOPY); 1720 * 1721 * write(f, "0", 1); 1722 * close(s); 1723 * } 1724 * 1725 * control_writeulong(CONTROL_DONE); 1726 * close(f); 1727 * } 1728 * 1729 * void server(const struct test_opts *opts) 1730 * { 1731 * int fd; 1732 * 1733 * while (control_readulong() == CONTROL_CONTINUE) { 1734 * fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1735 * vsock_wait_remote_close(fd); 1736 * close(fd); 1737 * } 1738 * } 1739 * 1740 * Refer to Documentation/fault-injection/fault-injection.rst. 1741 */ 1742 #define MAX_PAGE_ORDER 10 /* usually */ 1743 #define PAGE_SIZE 4096 1744 1745 /* Test for a memory leak. User is expected to run kmemleak scan, see README. */ 1746 static void test_stream_msgzcopy_leak_zcskb_client(const struct test_opts *opts) 1747 { 1748 size_t optmem_max, ctl_len, chunk_size; 1749 struct msghdr msg = { 0 }; 1750 struct iovec iov; 1751 char *chunk; 1752 int fd, res; 1753 FILE *f; 1754 1755 f = fopen("/proc/sys/net/core/optmem_max", "r"); 1756 if (!f) { 1757 perror("fopen(optmem_max)"); 1758 exit(EXIT_FAILURE); 1759 } 1760 1761 if (fscanf(f, "%zu", &optmem_max) != 1) { 1762 fprintf(stderr, "fscanf(optmem_max) failed\n"); 1763 exit(EXIT_FAILURE); 1764 } 1765 1766 fclose(f); 1767 1768 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 1769 if (fd < 0) { 1770 perror("connect"); 1771 exit(EXIT_FAILURE); 1772 } 1773 1774 enable_so_zerocopy_check(fd); 1775 1776 ctl_len = optmem_max - 1; 1777 if (ctl_len > PAGE_SIZE << MAX_PAGE_ORDER) { 1778 fprintf(stderr, "Try with net.core.optmem_max = 100000\n"); 1779 exit(EXIT_FAILURE); 1780 } 1781 1782 chunk_size = CMSG_SPACE(ctl_len); 1783 chunk = malloc(chunk_size); 1784 if (!chunk) { 1785 perror("malloc"); 1786 exit(EXIT_FAILURE); 1787 } 1788 memset(chunk, 0, chunk_size); 1789 1790 iov.iov_base = &(char){ 0 }; 1791 iov.iov_len = 1; 1792 1793 msg.msg_iov = &iov; 1794 msg.msg_iovlen = 1; 1795 msg.msg_control = chunk; 1796 msg.msg_controllen = ctl_len; 1797 1798 errno = 0; 1799 res = sendmsg(fd, &msg, MSG_ZEROCOPY); 1800 if (res >= 0 || errno != ENOMEM) { 1801 fprintf(stderr, "Expected ENOMEM, got errno=%d res=%d\n", 1802 errno, res); 1803 exit(EXIT_FAILURE); 1804 } 1805 1806 close(fd); 1807 } 1808 1809 static void test_stream_msgzcopy_leak_zcskb_server(const struct test_opts *opts) 1810 { 1811 int fd; 1812 1813 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1814 if (fd < 0) { 1815 perror("accept"); 1816 exit(EXIT_FAILURE); 1817 } 1818 1819 vsock_wait_remote_close(fd); 1820 close(fd); 1821 } 1822 1823 #define MAX_PORT_RETRIES 24 /* net/vmw_vsock/af_vsock.c */ 1824 1825 static bool test_stream_transport_uaf(int cid) 1826 { 1827 int sockets[MAX_PORT_RETRIES]; 1828 struct sockaddr_vm addr; 1829 socklen_t alen; 1830 int fd, i, c; 1831 bool ret; 1832 1833 /* Probe for a transport by attempting a local CID bind. Unavailable 1834 * transport (or more specifically: an unsupported transport/CID 1835 * combination) results in EADDRNOTAVAIL, other errnos are fatal. 1836 */ 1837 fd = vsock_bind_try(cid, VMADDR_PORT_ANY, SOCK_STREAM); 1838 if (fd < 0) { 1839 if (errno != EADDRNOTAVAIL) { 1840 perror("Unexpected bind() errno"); 1841 exit(EXIT_FAILURE); 1842 } 1843 1844 return false; 1845 } 1846 1847 alen = sizeof(addr); 1848 if (getsockname(fd, (struct sockaddr *)&addr, &alen)) { 1849 perror("getsockname"); 1850 exit(EXIT_FAILURE); 1851 } 1852 1853 /* Drain the autobind pool; see __vsock_bind_connectible(). */ 1854 for (i = 0; i < MAX_PORT_RETRIES; ++i) 1855 sockets[i] = vsock_bind(cid, ++addr.svm_port, SOCK_STREAM); 1856 1857 close(fd); 1858 1859 /* Setting SOCK_NONBLOCK makes connect() return soon after 1860 * (re-)assigning the transport. We are not connecting to anything 1861 * anyway, so there is no point entering the main loop in 1862 * vsock_connect(); waiting for timeout, checking for signals, etc. 1863 */ 1864 fd = socket(AF_VSOCK, SOCK_STREAM | SOCK_NONBLOCK, 0); 1865 if (fd < 0) { 1866 perror("socket"); 1867 exit(EXIT_FAILURE); 1868 } 1869 1870 /* Assign transport, while failing to autobind. Autobind pool was 1871 * drained, so EADDRNOTAVAIL coming from __vsock_bind_connectible() is 1872 * expected. 1873 * 1874 * One exception is ENODEV which is thrown by vsock_assign_transport(), 1875 * i.e. before vsock_auto_bind(), when the only transport loaded is 1876 * vhost. 1877 */ 1878 if (!connect(fd, (struct sockaddr *)&addr, alen)) { 1879 fprintf(stderr, "Unexpected connect() success\n"); 1880 exit(EXIT_FAILURE); 1881 } 1882 if (errno == ENODEV && cid == VMADDR_CID_HOST) { 1883 ret = false; 1884 goto cleanup; 1885 } 1886 if (errno != EADDRNOTAVAIL) { 1887 perror("Unexpected connect() errno"); 1888 exit(EXIT_FAILURE); 1889 } 1890 1891 /* Reassign transport, triggering old transport release and 1892 * (potentially) unbinding of an unbound socket. 1893 * 1894 * Vulnerable system may crash now. 1895 */ 1896 for (c = VMADDR_CID_HYPERVISOR; c <= VMADDR_CID_HOST + 1; ++c) { 1897 if (c != cid) { 1898 addr.svm_cid = c; 1899 (void)connect(fd, (struct sockaddr *)&addr, alen); 1900 } 1901 } 1902 1903 ret = true; 1904 cleanup: 1905 close(fd); 1906 while (i--) 1907 close(sockets[i]); 1908 1909 return ret; 1910 } 1911 1912 /* Test attempts to trigger a transport release for an unbound socket. This can 1913 * lead to a reference count mishandling. 1914 */ 1915 static void test_stream_transport_uaf_client(const struct test_opts *opts) 1916 { 1917 bool tested = false; 1918 int cid, tr; 1919 1920 for (cid = VMADDR_CID_HYPERVISOR; cid <= VMADDR_CID_HOST + 1; ++cid) 1921 tested |= test_stream_transport_uaf(cid); 1922 1923 tr = get_transports(); 1924 if (!tr) 1925 fprintf(stderr, "No transports detected\n"); 1926 else if (tr == TRANSPORT_VIRTIO) 1927 fprintf(stderr, "Setup unsupported: sole virtio transport\n"); 1928 else if (!tested) 1929 fprintf(stderr, "No transports tested\n"); 1930 } 1931 1932 static void test_stream_connect_retry_client(const struct test_opts *opts) 1933 { 1934 int fd; 1935 1936 fd = socket(AF_VSOCK, SOCK_STREAM, 0); 1937 if (fd < 0) { 1938 perror("socket"); 1939 exit(EXIT_FAILURE); 1940 } 1941 1942 if (!vsock_connect_fd(fd, opts->peer_cid, opts->peer_port)) { 1943 fprintf(stderr, "Unexpected connect() #1 success\n"); 1944 exit(EXIT_FAILURE); 1945 } 1946 1947 control_writeln("LISTEN"); 1948 control_expectln("LISTENING"); 1949 1950 if (vsock_connect_fd(fd, opts->peer_cid, opts->peer_port)) { 1951 perror("connect() #2"); 1952 exit(EXIT_FAILURE); 1953 } 1954 1955 close(fd); 1956 } 1957 1958 static void test_stream_connect_retry_server(const struct test_opts *opts) 1959 { 1960 int fd; 1961 1962 control_expectln("LISTEN"); 1963 1964 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 1965 if (fd < 0) { 1966 perror("accept"); 1967 exit(EXIT_FAILURE); 1968 } 1969 1970 vsock_wait_remote_close(fd); 1971 close(fd); 1972 } 1973 1974 #define TRANSPORT_CHANGE_TIMEOUT 2 /* seconds */ 1975 1976 static void *test_stream_transport_change_thread(void *vargp) 1977 { 1978 pid_t *pid = (pid_t *)vargp; 1979 int ret; 1980 1981 ret = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); 1982 if (ret) { 1983 fprintf(stderr, "pthread_setcanceltype: %d\n", ret); 1984 exit(EXIT_FAILURE); 1985 } 1986 1987 while (true) { 1988 if (kill(*pid, SIGUSR1) < 0) { 1989 perror("kill"); 1990 exit(EXIT_FAILURE); 1991 } 1992 } 1993 return NULL; 1994 } 1995 1996 static void test_transport_change_signal_handler(int signal) 1997 { 1998 /* We need a custom handler for SIGUSR1 as the default one terminates the process. */ 1999 } 2000 2001 static void test_stream_transport_change_client(const struct test_opts *opts) 2002 { 2003 __sighandler_t old_handler; 2004 pid_t pid = getpid(); 2005 pthread_t thread_id; 2006 time_t tout; 2007 int ret, tr; 2008 2009 tr = get_transports(); 2010 2011 /* Print a warning if there is a G2H transport loaded. 2012 * This is on a best effort basis because VMCI can be either G2H and H2G, and there is 2013 * no easy way to understand it. 2014 * The bug we are testing only appears when G2H transports are not loaded. 2015 * This is because `vsock_assign_transport`, when using CID 0, assigns a G2H transport 2016 * to vsk->transport. If none is available it is set to NULL, causing the null-ptr-deref. 2017 */ 2018 if (tr & TRANSPORTS_G2H) 2019 fprintf(stderr, "G2H Transport detected. This test will not fail.\n"); 2020 2021 old_handler = signal(SIGUSR1, test_transport_change_signal_handler); 2022 if (old_handler == SIG_ERR) { 2023 perror("signal"); 2024 exit(EXIT_FAILURE); 2025 } 2026 2027 ret = pthread_create(&thread_id, NULL, test_stream_transport_change_thread, &pid); 2028 if (ret) { 2029 fprintf(stderr, "pthread_create: %d\n", ret); 2030 exit(EXIT_FAILURE); 2031 } 2032 2033 control_expectln("LISTENING"); 2034 2035 tout = current_nsec() + TRANSPORT_CHANGE_TIMEOUT * NSEC_PER_SEC; 2036 do { 2037 struct sockaddr_vm sa = { 2038 .svm_family = AF_VSOCK, 2039 .svm_cid = opts->peer_cid, 2040 .svm_port = opts->peer_port, 2041 }; 2042 bool send_control = false; 2043 int s; 2044 2045 s = socket(AF_VSOCK, SOCK_STREAM, 0); 2046 if (s < 0) { 2047 perror("socket"); 2048 exit(EXIT_FAILURE); 2049 } 2050 2051 /* Although setting SO_LINGER does not affect the original test 2052 * for null-ptr-deref, it may trigger a lockdep warning. 2053 */ 2054 enable_so_linger(s, 1); 2055 2056 ret = connect(s, (struct sockaddr *)&sa, sizeof(sa)); 2057 /* The connect can fail due to signals coming from the thread, 2058 * or because the receiver connection queue is full. 2059 * Ignoring also the latter case because there is no way 2060 * of synchronizing client's connect and server's accept when 2061 * connect(s) are constantly being interrupted by signals. 2062 */ 2063 if (ret == -1 && (errno != EINTR && errno != ECONNRESET)) { 2064 perror("connect"); 2065 exit(EXIT_FAILURE); 2066 } 2067 2068 /* Notify the server if the connect() is successful or the 2069 * receiver connection queue is full, so it will do accept() 2070 * to drain it. 2071 */ 2072 if (!ret || errno == ECONNRESET) 2073 send_control = true; 2074 2075 /* Set CID to 0 cause a transport change. */ 2076 sa.svm_cid = 0; 2077 2078 /* There is a case where this will not fail: 2079 * if the previous connect() is interrupted while the 2080 * connection request is already sent, this second 2081 * connect() will wait for the response. 2082 */ 2083 ret = connect(s, (struct sockaddr *)&sa, sizeof(sa)); 2084 if (!ret || errno == ECONNRESET) 2085 send_control = true; 2086 2087 close(s); 2088 2089 if (send_control) 2090 control_writeulong(CONTROL_CONTINUE); 2091 2092 } while (current_nsec() < tout); 2093 2094 control_writeulong(CONTROL_DONE); 2095 2096 ret = pthread_cancel(thread_id); 2097 if (ret) { 2098 fprintf(stderr, "pthread_cancel: %d\n", ret); 2099 exit(EXIT_FAILURE); 2100 } 2101 2102 ret = pthread_join(thread_id, NULL); 2103 if (ret) { 2104 fprintf(stderr, "pthread_join: %d\n", ret); 2105 exit(EXIT_FAILURE); 2106 } 2107 2108 if (signal(SIGUSR1, old_handler) == SIG_ERR) { 2109 perror("signal"); 2110 exit(EXIT_FAILURE); 2111 } 2112 } 2113 2114 static void test_stream_transport_change_server(const struct test_opts *opts) 2115 { 2116 int s = vsock_stream_listen(VMADDR_CID_ANY, opts->peer_port); 2117 2118 /* Set the socket to be nonblocking because connects that have been interrupted 2119 * (EINTR) can fill the receiver's accept queue anyway, leading to connect failure. 2120 * As of today (6.15) in such situation there is no way to understand, from the 2121 * client side, if the connection has been queued in the server or not. 2122 */ 2123 if (fcntl(s, F_SETFL, fcntl(s, F_GETFL, 0) | O_NONBLOCK) < 0) { 2124 perror("fcntl"); 2125 exit(EXIT_FAILURE); 2126 } 2127 control_writeln("LISTENING"); 2128 2129 while (control_readulong() == CONTROL_CONTINUE) { 2130 /* Must accept the connection, otherwise the `listen` 2131 * queue will fill up and new connections will fail. 2132 * There can be more than one queued connection, 2133 * clear them all. 2134 */ 2135 while (true) { 2136 int client = accept(s, NULL, NULL); 2137 2138 if (client < 0) { 2139 if (errno == EAGAIN) 2140 break; 2141 2142 perror("accept"); 2143 exit(EXIT_FAILURE); 2144 } 2145 2146 close(client); 2147 } 2148 } 2149 2150 close(s); 2151 } 2152 2153 static void test_stream_linger_client(const struct test_opts *opts) 2154 { 2155 int fd; 2156 2157 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 2158 if (fd < 0) { 2159 perror("connect"); 2160 exit(EXIT_FAILURE); 2161 } 2162 2163 enable_so_linger(fd, 1); 2164 close(fd); 2165 } 2166 2167 static void test_stream_linger_server(const struct test_opts *opts) 2168 { 2169 int fd; 2170 2171 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 2172 if (fd < 0) { 2173 perror("accept"); 2174 exit(EXIT_FAILURE); 2175 } 2176 2177 vsock_wait_remote_close(fd); 2178 close(fd); 2179 } 2180 2181 /* Half of the default to not risk timing out the control channel */ 2182 #define LINGER_TIMEOUT (TIMEOUT / 2) 2183 2184 static void test_stream_nolinger_client(const struct test_opts *opts) 2185 { 2186 bool waited; 2187 time_t ns; 2188 int fd; 2189 2190 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 2191 if (fd < 0) { 2192 perror("connect"); 2193 exit(EXIT_FAILURE); 2194 } 2195 2196 enable_so_linger(fd, LINGER_TIMEOUT); 2197 send_byte(fd, 1, 0); /* Left unread to expose incorrect behaviour. */ 2198 waited = vsock_wait_sent(fd); 2199 2200 ns = current_nsec(); 2201 close(fd); 2202 ns = current_nsec() - ns; 2203 2204 if (!waited) { 2205 fprintf(stderr, "Test skipped, SIOCOUTQ not supported.\n"); 2206 } else if (DIV_ROUND_UP(ns, NSEC_PER_SEC) >= LINGER_TIMEOUT) { 2207 fprintf(stderr, "Unexpected lingering\n"); 2208 exit(EXIT_FAILURE); 2209 } 2210 2211 control_writeln("DONE"); 2212 } 2213 2214 static void test_stream_nolinger_server(const struct test_opts *opts) 2215 { 2216 int fd; 2217 2218 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 2219 if (fd < 0) { 2220 perror("accept"); 2221 exit(EXIT_FAILURE); 2222 } 2223 2224 control_expectln("DONE"); 2225 close(fd); 2226 } 2227 2228 static void test_stream_accepted_setsockopt_client(const struct test_opts *opts) 2229 { 2230 int fd; 2231 2232 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 2233 if (fd < 0) { 2234 perror("connect"); 2235 exit(EXIT_FAILURE); 2236 } 2237 2238 close(fd); 2239 } 2240 2241 static void test_stream_accepted_setsockopt_server(const struct test_opts *opts) 2242 { 2243 int fd; 2244 2245 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 2246 if (fd < 0) { 2247 perror("accept"); 2248 exit(EXIT_FAILURE); 2249 } 2250 2251 enable_so_zerocopy_check(fd); 2252 close(fd); 2253 } 2254 2255 static void test_stream_tx_credit_bounds_client(const struct test_opts *opts) 2256 { 2257 unsigned long long sock_buf_size; 2258 size_t total = 0; 2259 char buf[4096]; 2260 int fd; 2261 2262 memset(buf, 'A', sizeof(buf)); 2263 2264 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); 2265 if (fd < 0) { 2266 perror("connect"); 2267 exit(EXIT_FAILURE); 2268 } 2269 2270 sock_buf_size = SOCK_BUF_SIZE_SMALL; 2271 2272 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, 2273 sock_buf_size, 2274 "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); 2275 2276 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 2277 sock_buf_size, 2278 "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); 2279 2280 if (fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK) < 0) { 2281 perror("fcntl(F_SETFL)"); 2282 exit(EXIT_FAILURE); 2283 } 2284 2285 control_expectln("SRVREADY"); 2286 2287 for (;;) { 2288 ssize_t sent = send(fd, buf, sizeof(buf), 0); 2289 2290 if (sent == 0) { 2291 fprintf(stderr, "unexpected EOF while sending bytes\n"); 2292 exit(EXIT_FAILURE); 2293 } 2294 2295 if (sent < 0) { 2296 if (errno == EINTR) 2297 continue; 2298 2299 if (errno == EAGAIN || errno == EWOULDBLOCK) 2300 break; 2301 2302 perror("send"); 2303 exit(EXIT_FAILURE); 2304 } 2305 2306 total += sent; 2307 } 2308 2309 control_writeln("CLIDONE"); 2310 close(fd); 2311 2312 /* We should not be able to send more bytes than the value set as 2313 * local buffer size. 2314 */ 2315 if (total > sock_buf_size) { 2316 fprintf(stderr, 2317 "TX credit too large: queued %zu bytes (expected <= %llu)\n", 2318 total, sock_buf_size); 2319 exit(EXIT_FAILURE); 2320 } 2321 } 2322 2323 static void test_stream_tx_credit_bounds_server(const struct test_opts *opts) 2324 { 2325 unsigned long long sock_buf_size; 2326 int fd; 2327 2328 fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); 2329 if (fd < 0) { 2330 perror("accept"); 2331 exit(EXIT_FAILURE); 2332 } 2333 2334 sock_buf_size = SOCK_BUF_SIZE; 2335 2336 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, 2337 sock_buf_size, 2338 "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); 2339 2340 setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, 2341 sock_buf_size, 2342 "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); 2343 2344 control_writeln("SRVREADY"); 2345 control_expectln("CLIDONE"); 2346 2347 close(fd); 2348 } 2349 2350 static struct test_case test_cases[] = { 2351 { 2352 .name = "SOCK_STREAM connection reset", 2353 .run_client = test_stream_connection_reset, 2354 }, 2355 { 2356 .name = "SOCK_STREAM bind only", 2357 .run_client = test_stream_bind_only_client, 2358 .run_server = test_stream_bind_only_server, 2359 }, 2360 { 2361 .name = "SOCK_STREAM client close", 2362 .run_client = test_stream_client_close_client, 2363 .run_server = test_stream_client_close_server, 2364 }, 2365 { 2366 .name = "SOCK_STREAM server close", 2367 .run_client = test_stream_server_close_client, 2368 .run_server = test_stream_server_close_server, 2369 }, 2370 { 2371 .name = "SOCK_STREAM multiple connections", 2372 .run_client = test_stream_multiconn_client, 2373 .run_server = test_stream_multiconn_server, 2374 }, 2375 { 2376 .name = "SOCK_STREAM MSG_PEEK", 2377 .run_client = test_stream_msg_peek_client, 2378 .run_server = test_stream_msg_peek_server, 2379 }, 2380 { 2381 .name = "SOCK_SEQPACKET msg bounds", 2382 .run_client = test_seqpacket_msg_bounds_client, 2383 .run_server = test_seqpacket_msg_bounds_server, 2384 }, 2385 { 2386 .name = "SOCK_SEQPACKET MSG_TRUNC flag", 2387 .run_client = test_seqpacket_msg_trunc_client, 2388 .run_server = test_seqpacket_msg_trunc_server, 2389 }, 2390 { 2391 .name = "SOCK_SEQPACKET timeout", 2392 .run_client = test_seqpacket_timeout_client, 2393 .run_server = test_seqpacket_timeout_server, 2394 }, 2395 { 2396 .name = "SOCK_SEQPACKET invalid receive buffer", 2397 .run_client = test_seqpacket_invalid_rec_buffer_client, 2398 .run_server = test_seqpacket_invalid_rec_buffer_server, 2399 }, 2400 { 2401 .name = "SOCK_STREAM poll() + SO_RCVLOWAT", 2402 .run_client = test_stream_poll_rcvlowat_client, 2403 .run_server = test_stream_poll_rcvlowat_server, 2404 }, 2405 { 2406 .name = "SOCK_SEQPACKET big message", 2407 .run_client = test_seqpacket_bigmsg_client, 2408 .run_server = test_seqpacket_bigmsg_server, 2409 }, 2410 { 2411 .name = "SOCK_STREAM test invalid buffer", 2412 .run_client = test_stream_inv_buf_client, 2413 .run_server = test_stream_inv_buf_server, 2414 }, 2415 { 2416 .name = "SOCK_SEQPACKET test invalid buffer", 2417 .run_client = test_seqpacket_inv_buf_client, 2418 .run_server = test_seqpacket_inv_buf_server, 2419 }, 2420 { 2421 .name = "SOCK_STREAM virtio skb merge", 2422 .run_client = test_stream_virtio_skb_merge_client, 2423 .run_server = test_stream_virtio_skb_merge_server, 2424 }, 2425 { 2426 .name = "SOCK_SEQPACKET MSG_PEEK", 2427 .run_client = test_seqpacket_msg_peek_client, 2428 .run_server = test_seqpacket_msg_peek_server, 2429 }, 2430 { 2431 .name = "SOCK_STREAM SHUT_WR", 2432 .run_client = test_stream_shutwr_client, 2433 .run_server = test_stream_shutwr_server, 2434 }, 2435 { 2436 .name = "SOCK_STREAM SHUT_RD", 2437 .run_client = test_stream_shutrd_client, 2438 .run_server = test_stream_shutrd_server, 2439 }, 2440 { 2441 .name = "SOCK_STREAM MSG_ZEROCOPY", 2442 .run_client = test_stream_msgzcopy_client, 2443 .run_server = test_stream_msgzcopy_server, 2444 }, 2445 { 2446 .name = "SOCK_SEQPACKET MSG_ZEROCOPY", 2447 .run_client = test_seqpacket_msgzcopy_client, 2448 .run_server = test_seqpacket_msgzcopy_server, 2449 }, 2450 { 2451 .name = "SOCK_STREAM MSG_ZEROCOPY empty MSG_ERRQUEUE", 2452 .run_client = test_stream_msgzcopy_empty_errq_client, 2453 .run_server = test_stream_msgzcopy_empty_errq_server, 2454 }, 2455 { 2456 .name = "SOCK_STREAM double bind connect", 2457 .run_client = test_double_bind_connect_client, 2458 .run_server = test_double_bind_connect_server, 2459 }, 2460 { 2461 .name = "SOCK_STREAM virtio credit update + SO_RCVLOWAT", 2462 .run_client = test_stream_rcvlowat_def_cred_upd_client, 2463 .run_server = test_stream_cred_upd_on_set_rcvlowat, 2464 }, 2465 { 2466 .name = "SOCK_STREAM virtio credit update + low rx_bytes", 2467 .run_client = test_stream_rcvlowat_def_cred_upd_client, 2468 .run_server = test_stream_cred_upd_on_low_rx_bytes, 2469 }, 2470 { 2471 .name = "SOCK_STREAM ioctl(SIOCOUTQ) 0 unsent bytes", 2472 .run_client = test_stream_unsent_bytes_client, 2473 .run_server = test_stream_unsent_bytes_server, 2474 }, 2475 { 2476 .name = "SOCK_SEQPACKET ioctl(SIOCOUTQ) 0 unsent bytes", 2477 .run_client = test_seqpacket_unsent_bytes_client, 2478 .run_server = test_seqpacket_unsent_bytes_server, 2479 }, 2480 { 2481 .name = "SOCK_STREAM leak accept queue", 2482 .run_client = test_stream_leak_acceptq_client, 2483 .run_server = test_stream_leak_acceptq_server, 2484 }, 2485 { 2486 .name = "SOCK_STREAM MSG_ZEROCOPY leak MSG_ERRQUEUE", 2487 .run_client = test_stream_msgzcopy_leak_errq_client, 2488 .run_server = test_stream_msgzcopy_leak_errq_server, 2489 }, 2490 { 2491 .name = "SOCK_STREAM MSG_ZEROCOPY leak completion skb", 2492 .run_client = test_stream_msgzcopy_leak_zcskb_client, 2493 .run_server = test_stream_msgzcopy_leak_zcskb_server, 2494 }, 2495 { 2496 .name = "SOCK_STREAM transport release use-after-free", 2497 .run_client = test_stream_transport_uaf_client, 2498 }, 2499 { 2500 .name = "SOCK_STREAM retry failed connect()", 2501 .run_client = test_stream_connect_retry_client, 2502 .run_server = test_stream_connect_retry_server, 2503 }, 2504 { 2505 .name = "SOCK_STREAM SO_LINGER null-ptr-deref", 2506 .run_client = test_stream_linger_client, 2507 .run_server = test_stream_linger_server, 2508 }, 2509 { 2510 .name = "SOCK_STREAM SO_LINGER close() on unread", 2511 .run_client = test_stream_nolinger_client, 2512 .run_server = test_stream_nolinger_server, 2513 }, 2514 { 2515 .name = "SOCK_STREAM transport change null-ptr-deref, lockdep warn", 2516 .run_client = test_stream_transport_change_client, 2517 .run_server = test_stream_transport_change_server, 2518 }, 2519 { 2520 .name = "SOCK_STREAM ioctl(SIOCINQ) functionality", 2521 .run_client = test_stream_unread_bytes_client, 2522 .run_server = test_stream_unread_bytes_server, 2523 }, 2524 { 2525 .name = "SOCK_SEQPACKET ioctl(SIOCINQ) functionality", 2526 .run_client = test_seqpacket_unread_bytes_client, 2527 .run_server = test_seqpacket_unread_bytes_server, 2528 }, 2529 { 2530 .name = "SOCK_STREAM accept()ed socket custom setsockopt()", 2531 .run_client = test_stream_accepted_setsockopt_client, 2532 .run_server = test_stream_accepted_setsockopt_server, 2533 }, 2534 { 2535 .name = "SOCK_STREAM virtio MSG_ZEROCOPY coalescence corruption", 2536 .run_client = test_stream_msgzcopy_mangle_client, 2537 .run_server = test_stream_msgzcopy_mangle_server, 2538 }, 2539 { 2540 .name = "SOCK_STREAM TX credit bounds", 2541 .run_client = test_stream_tx_credit_bounds_client, 2542 .run_server = test_stream_tx_credit_bounds_server, 2543 }, 2544 { 2545 .name = "SOCK_STREAM MSG_PEEK after partial recv", 2546 .run_client = test_stream_msg_peek_client, 2547 .run_server = test_stream_peek_after_recv_server, 2548 }, 2549 {}, 2550 }; 2551 2552 static const char optstring[] = ""; 2553 static const struct option longopts[] = { 2554 { 2555 .name = "control-host", 2556 .has_arg = required_argument, 2557 .val = 'H', 2558 }, 2559 { 2560 .name = "control-port", 2561 .has_arg = required_argument, 2562 .val = 'P', 2563 }, 2564 { 2565 .name = "mode", 2566 .has_arg = required_argument, 2567 .val = 'm', 2568 }, 2569 { 2570 .name = "peer-cid", 2571 .has_arg = required_argument, 2572 .val = 'p', 2573 }, 2574 { 2575 .name = "peer-port", 2576 .has_arg = required_argument, 2577 .val = 'q', 2578 }, 2579 { 2580 .name = "list", 2581 .has_arg = no_argument, 2582 .val = 'l', 2583 }, 2584 { 2585 .name = "skip", 2586 .has_arg = required_argument, 2587 .val = 's', 2588 }, 2589 { 2590 .name = "pick", 2591 .has_arg = required_argument, 2592 .val = 't', 2593 }, 2594 { 2595 .name = "help", 2596 .has_arg = no_argument, 2597 .val = '?', 2598 }, 2599 {}, 2600 }; 2601 2602 static void usage(void) 2603 { 2604 fprintf(stderr, "Usage: vsock_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid> [--peer-port=<port>] [--list] [--skip=<test_id>]\n" 2605 "\n" 2606 " Server: vsock_test --control-port=1234 --mode=server --peer-cid=3\n" 2607 " Client: vsock_test --control-host=192.168.0.1 --control-port=1234 --mode=client --peer-cid=2\n" 2608 "\n" 2609 "Run vsock.ko tests. Must be launched in both guest\n" 2610 "and host. One side must use --mode=client and\n" 2611 "the other side must use --mode=server.\n" 2612 "\n" 2613 "A TCP control socket connection is used to coordinate tests\n" 2614 "between the client and the server. The server requires a\n" 2615 "listen address and the client requires an address to\n" 2616 "connect to.\n" 2617 "\n" 2618 "The CID of the other side must be given with --peer-cid=<cid>.\n" 2619 "During the test, two AF_VSOCK ports will be used: the port\n" 2620 "specified with --peer-port=<port> (or the default port)\n" 2621 "and the next one.\n" 2622 "\n" 2623 "Options:\n" 2624 " --help This help message\n" 2625 " --control-host <host> Server IP address to connect to\n" 2626 " --control-port <port> Server port to listen on/connect to\n" 2627 " --mode client|server Server or client mode\n" 2628 " --peer-cid <cid> CID of the other side\n" 2629 " --peer-port <port> AF_VSOCK port used for the test [default: %d]\n" 2630 " --list List of tests that will be executed\n" 2631 " --pick <test_id> Test ID to execute selectively;\n" 2632 " use multiple --pick options to select more tests\n" 2633 " --skip <test_id> Test ID to skip;\n" 2634 " use multiple --skip options to skip more tests\n", 2635 DEFAULT_PEER_PORT 2636 ); 2637 exit(EXIT_FAILURE); 2638 } 2639 2640 int main(int argc, char **argv) 2641 { 2642 const char *control_host = NULL; 2643 const char *control_port = NULL; 2644 struct test_opts opts = { 2645 .mode = TEST_MODE_UNSET, 2646 .peer_cid = VMADDR_CID_ANY, 2647 .peer_port = DEFAULT_PEER_PORT, 2648 }; 2649 2650 srand(time(NULL)); 2651 init_signals(); 2652 2653 for (;;) { 2654 int opt = getopt_long(argc, argv, optstring, longopts, NULL); 2655 2656 if (opt == -1) 2657 break; 2658 2659 switch (opt) { 2660 case 'H': 2661 control_host = optarg; 2662 break; 2663 case 'm': 2664 if (strcmp(optarg, "client") == 0) 2665 opts.mode = TEST_MODE_CLIENT; 2666 else if (strcmp(optarg, "server") == 0) 2667 opts.mode = TEST_MODE_SERVER; 2668 else { 2669 fprintf(stderr, "--mode must be \"client\" or \"server\"\n"); 2670 return EXIT_FAILURE; 2671 } 2672 break; 2673 case 'p': 2674 opts.peer_cid = parse_cid(optarg); 2675 break; 2676 case 'q': 2677 opts.peer_port = parse_port(optarg); 2678 break; 2679 case 'P': 2680 control_port = optarg; 2681 break; 2682 case 'l': 2683 list_tests(test_cases); 2684 break; 2685 case 's': 2686 skip_test(test_cases, ARRAY_SIZE(test_cases) - 1, 2687 optarg); 2688 break; 2689 case 't': 2690 pick_test(test_cases, ARRAY_SIZE(test_cases) - 1, 2691 optarg); 2692 break; 2693 case '?': 2694 default: 2695 usage(); 2696 } 2697 } 2698 2699 if (!control_port) 2700 usage(); 2701 if (opts.mode == TEST_MODE_UNSET) 2702 usage(); 2703 if (opts.peer_cid == VMADDR_CID_ANY) 2704 usage(); 2705 2706 if (!control_host) { 2707 if (opts.mode != TEST_MODE_SERVER) 2708 usage(); 2709 control_host = "0.0.0.0"; 2710 } 2711 2712 control_init(control_host, control_port, 2713 opts.mode == TEST_MODE_SERVER); 2714 2715 run_tests(test_cases, &opts); 2716 2717 control_cleanup(); 2718 return EXIT_SUCCESS; 2719 } 2720