1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VMware vSockets Driver 4 * 5 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. 6 */ 7 8 /* Implementation notes: 9 * 10 * - There are two kinds of sockets: those created by user action (such as 11 * calling socket(2)) and those created by incoming connection request packets. 12 * 13 * - There are two "global" tables, one for bound sockets (sockets that have 14 * specified an address that they are responsible for) and one for connected 15 * sockets (sockets that have established a connection with another socket). 16 * These tables are "global" in that all sockets on the system are placed 17 * within them. - Note, though, that the bound table contains an extra entry 18 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in 19 * that list. The bound table is used solely for lookup of sockets when packets 20 * are received and that's not necessary for SOCK_DGRAM sockets since we create 21 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM 22 * sockets out of the bound hash buckets will reduce the chance of collisions 23 * when looking for SOCK_STREAM sockets and prevents us from having to check the 24 * socket type in the hash table lookups. 25 * 26 * - Sockets created by user action will either be "client" sockets that 27 * initiate a connection or "server" sockets that listen for connections; we do 28 * not support simultaneous connects (two "client" sockets connecting). 29 * 30 * - "Server" sockets are referred to as listener sockets throughout this 31 * implementation because they are in the TCP_LISTEN state. When a 32 * connection request is received (the second kind of socket mentioned above), 33 * we create a new socket and refer to it as a pending socket. These pending 34 * sockets are placed on the pending connection list of the listener socket. 35 * When future packets are received for the address the listener socket is 36 * bound to, we check if the source of the packet is from one that has an 37 * existing pending connection. If it does, we process the packet for the 38 * pending socket. When that socket reaches the connected state, it is removed 39 * from the listener socket's pending list and enqueued in the listener 40 * socket's accept queue. Callers of accept(2) will accept connected sockets 41 * from the listener socket's accept queue. If the socket cannot be accepted 42 * for some reason then it is marked rejected. Once the connection is 43 * accepted, it is owned by the user process and the responsibility for cleanup 44 * falls with that user process. 45 * 46 * - It is possible that these pending sockets will never reach the connected 47 * state; in fact, we may never receive another packet after the connection 48 * request. Because of this, we must schedule a cleanup function to run in the 49 * future, after some amount of time passes where a connection should have been 50 * established. This function ensures that the socket is off all lists so it 51 * cannot be retrieved, then drops all references to the socket so it is cleaned 52 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this 53 * function will also cleanup rejected sockets, those that reach the connected 54 * state but leave it before they have been accepted. 55 * 56 * - Lock ordering for pending or accept queue sockets is: 57 * 58 * lock_sock(listener); 59 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING); 60 * 61 * Using explicit nested locking keeps lockdep happy since normally only one 62 * lock of a given class may be taken at a time. 63 * 64 * - Sockets created by user action will be cleaned up when the user process 65 * calls close(2), causing our release implementation to be called. Our release 66 * implementation will perform some cleanup then drop the last reference so our 67 * sk_destruct implementation is invoked. Our sk_destruct implementation will 68 * perform additional cleanup that's common for both types of sockets. 69 * 70 * - A socket's reference count is what ensures that the structure won't be 71 * freed. Each entry in a list (such as the "global" bound and connected tables 72 * and the listener socket's pending list and connected queue) ensures a 73 * reference. When we defer work until process context and pass a socket as our 74 * argument, we must ensure the reference count is increased to ensure the 75 * socket isn't freed before the function is run; the deferred function will 76 * then drop the reference. 77 * 78 * - sk->sk_state uses the TCP state constants because they are widely used by 79 * other address families and exposed to userspace tools like ss(8): 80 * 81 * TCP_CLOSE - unconnected 82 * TCP_SYN_SENT - connecting 83 * TCP_ESTABLISHED - connected 84 * TCP_CLOSING - disconnecting 85 * TCP_LISTEN - listening 86 */ 87 88 #include <linux/compat.h> 89 #include <linux/types.h> 90 #include <linux/bitops.h> 91 #include <linux/cred.h> 92 #include <linux/errqueue.h> 93 #include <linux/init.h> 94 #include <linux/io.h> 95 #include <linux/kernel.h> 96 #include <linux/sched/signal.h> 97 #include <linux/kmod.h> 98 #include <linux/list.h> 99 #include <linux/miscdevice.h> 100 #include <linux/module.h> 101 #include <linux/mutex.h> 102 #include <linux/net.h> 103 #include <linux/poll.h> 104 #include <linux/random.h> 105 #include <linux/skbuff.h> 106 #include <linux/smp.h> 107 #include <linux/socket.h> 108 #include <linux/stddef.h> 109 #include <linux/unistd.h> 110 #include <linux/wait.h> 111 #include <linux/workqueue.h> 112 #include <net/sock.h> 113 #include <net/af_vsock.h> 114 #include <uapi/linux/vm_sockets.h> 115 #include <uapi/asm-generic/ioctls.h> 116 117 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); 118 static void vsock_sk_destruct(struct sock *sk); 119 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 120 static void vsock_close(struct sock *sk, long timeout); 121 122 /* Protocol family. */ 123 struct proto vsock_proto = { 124 .name = "AF_VSOCK", 125 .owner = THIS_MODULE, 126 .obj_size = sizeof(struct vsock_sock), 127 .close = vsock_close, 128 #ifdef CONFIG_BPF_SYSCALL 129 .psock_update_sk_prot = vsock_bpf_update_proto, 130 #endif 131 }; 132 133 /* The default peer timeout indicates how long we will wait for a peer response 134 * to a control message. 135 */ 136 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ) 137 138 #define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256) 139 #define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256) 140 #define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128 141 142 /* Transport used for host->guest communication */ 143 static const struct vsock_transport *transport_h2g; 144 /* Transport used for guest->host communication */ 145 static const struct vsock_transport *transport_g2h; 146 /* Transport used for DGRAM communication */ 147 static const struct vsock_transport *transport_dgram; 148 /* Transport used for local communication */ 149 static const struct vsock_transport *transport_local; 150 static DEFINE_MUTEX(vsock_register_mutex); 151 152 /**** UTILS ****/ 153 154 /* Each bound VSocket is stored in the bind hash table and each connected 155 * VSocket is stored in the connected hash table. 156 * 157 * Unbound sockets are all put on the same list attached to the end of the hash 158 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in 159 * the bucket that their local address hashes to (vsock_bound_sockets(addr) 160 * represents the list that addr hashes to). 161 * 162 * Specifically, we initialize the vsock_bind_table array to a size of 163 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through 164 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and 165 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function 166 * mods with VSOCK_HASH_SIZE to ensure this. 167 */ 168 #define MAX_PORT_RETRIES 24 169 170 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE) 171 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)]) 172 #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE]) 173 174 /* XXX This can probably be implemented in a better way. */ 175 #define VSOCK_CONN_HASH(src, dst) \ 176 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE) 177 #define vsock_connected_sockets(src, dst) \ 178 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)]) 179 #define vsock_connected_sockets_vsk(vsk) \ 180 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr) 181 182 struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1]; 183 EXPORT_SYMBOL_GPL(vsock_bind_table); 184 struct list_head vsock_connected_table[VSOCK_HASH_SIZE]; 185 EXPORT_SYMBOL_GPL(vsock_connected_table); 186 DEFINE_SPINLOCK(vsock_table_lock); 187 EXPORT_SYMBOL_GPL(vsock_table_lock); 188 189 /* Autobind this socket to the local address if necessary. */ 190 static int vsock_auto_bind(struct vsock_sock *vsk) 191 { 192 struct sock *sk = sk_vsock(vsk); 193 struct sockaddr_vm local_addr; 194 195 if (vsock_addr_bound(&vsk->local_addr)) 196 return 0; 197 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 198 return __vsock_bind(sk, &local_addr); 199 } 200 201 static void vsock_init_tables(void) 202 { 203 int i; 204 205 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++) 206 INIT_LIST_HEAD(&vsock_bind_table[i]); 207 208 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) 209 INIT_LIST_HEAD(&vsock_connected_table[i]); 210 } 211 212 static void __vsock_insert_bound(struct list_head *list, 213 struct vsock_sock *vsk) 214 { 215 sock_hold(&vsk->sk); 216 list_add(&vsk->bound_table, list); 217 } 218 219 static void __vsock_insert_connected(struct list_head *list, 220 struct vsock_sock *vsk) 221 { 222 sock_hold(&vsk->sk); 223 list_add(&vsk->connected_table, list); 224 } 225 226 static void __vsock_remove_bound(struct vsock_sock *vsk) 227 { 228 list_del_init(&vsk->bound_table); 229 sock_put(&vsk->sk); 230 } 231 232 static void __vsock_remove_connected(struct vsock_sock *vsk) 233 { 234 list_del_init(&vsk->connected_table); 235 sock_put(&vsk->sk); 236 } 237 238 static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) 239 { 240 struct vsock_sock *vsk; 241 242 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) { 243 if (vsock_addr_equals_addr(addr, &vsk->local_addr)) 244 return sk_vsock(vsk); 245 246 if (addr->svm_port == vsk->local_addr.svm_port && 247 (vsk->local_addr.svm_cid == VMADDR_CID_ANY || 248 addr->svm_cid == VMADDR_CID_ANY)) 249 return sk_vsock(vsk); 250 } 251 252 return NULL; 253 } 254 255 static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, 256 struct sockaddr_vm *dst) 257 { 258 struct vsock_sock *vsk; 259 260 list_for_each_entry(vsk, vsock_connected_sockets(src, dst), 261 connected_table) { 262 if (vsock_addr_equals_addr(src, &vsk->remote_addr) && 263 dst->svm_port == vsk->local_addr.svm_port) { 264 return sk_vsock(vsk); 265 } 266 } 267 268 return NULL; 269 } 270 271 static void vsock_insert_unbound(struct vsock_sock *vsk) 272 { 273 spin_lock_bh(&vsock_table_lock); 274 __vsock_insert_bound(vsock_unbound_sockets, vsk); 275 spin_unlock_bh(&vsock_table_lock); 276 } 277 278 void vsock_insert_connected(struct vsock_sock *vsk) 279 { 280 struct list_head *list = vsock_connected_sockets( 281 &vsk->remote_addr, &vsk->local_addr); 282 283 spin_lock_bh(&vsock_table_lock); 284 __vsock_insert_connected(list, vsk); 285 spin_unlock_bh(&vsock_table_lock); 286 } 287 EXPORT_SYMBOL_GPL(vsock_insert_connected); 288 289 void vsock_remove_bound(struct vsock_sock *vsk) 290 { 291 spin_lock_bh(&vsock_table_lock); 292 if (__vsock_in_bound_table(vsk)) 293 __vsock_remove_bound(vsk); 294 spin_unlock_bh(&vsock_table_lock); 295 } 296 EXPORT_SYMBOL_GPL(vsock_remove_bound); 297 298 void vsock_remove_connected(struct vsock_sock *vsk) 299 { 300 spin_lock_bh(&vsock_table_lock); 301 if (__vsock_in_connected_table(vsk)) 302 __vsock_remove_connected(vsk); 303 spin_unlock_bh(&vsock_table_lock); 304 } 305 EXPORT_SYMBOL_GPL(vsock_remove_connected); 306 307 struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr) 308 { 309 struct sock *sk; 310 311 spin_lock_bh(&vsock_table_lock); 312 sk = __vsock_find_bound_socket(addr); 313 if (sk) 314 sock_hold(sk); 315 316 spin_unlock_bh(&vsock_table_lock); 317 318 return sk; 319 } 320 EXPORT_SYMBOL_GPL(vsock_find_bound_socket); 321 322 struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, 323 struct sockaddr_vm *dst) 324 { 325 struct sock *sk; 326 327 spin_lock_bh(&vsock_table_lock); 328 sk = __vsock_find_connected_socket(src, dst); 329 if (sk) 330 sock_hold(sk); 331 332 spin_unlock_bh(&vsock_table_lock); 333 334 return sk; 335 } 336 EXPORT_SYMBOL_GPL(vsock_find_connected_socket); 337 338 void vsock_remove_sock(struct vsock_sock *vsk) 339 { 340 vsock_remove_bound(vsk); 341 vsock_remove_connected(vsk); 342 } 343 EXPORT_SYMBOL_GPL(vsock_remove_sock); 344 345 void vsock_for_each_connected_socket(struct vsock_transport *transport, 346 void (*fn)(struct sock *sk)) 347 { 348 int i; 349 350 spin_lock_bh(&vsock_table_lock); 351 352 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { 353 struct vsock_sock *vsk; 354 list_for_each_entry(vsk, &vsock_connected_table[i], 355 connected_table) { 356 if (vsk->transport != transport) 357 continue; 358 359 fn(sk_vsock(vsk)); 360 } 361 } 362 363 spin_unlock_bh(&vsock_table_lock); 364 } 365 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket); 366 367 void vsock_add_pending(struct sock *listener, struct sock *pending) 368 { 369 struct vsock_sock *vlistener; 370 struct vsock_sock *vpending; 371 372 vlistener = vsock_sk(listener); 373 vpending = vsock_sk(pending); 374 375 sock_hold(pending); 376 sock_hold(listener); 377 list_add_tail(&vpending->pending_links, &vlistener->pending_links); 378 } 379 EXPORT_SYMBOL_GPL(vsock_add_pending); 380 381 void vsock_remove_pending(struct sock *listener, struct sock *pending) 382 { 383 struct vsock_sock *vpending = vsock_sk(pending); 384 385 list_del_init(&vpending->pending_links); 386 sock_put(listener); 387 sock_put(pending); 388 } 389 EXPORT_SYMBOL_GPL(vsock_remove_pending); 390 391 void vsock_enqueue_accept(struct sock *listener, struct sock *connected) 392 { 393 struct vsock_sock *vlistener; 394 struct vsock_sock *vconnected; 395 396 vlistener = vsock_sk(listener); 397 vconnected = vsock_sk(connected); 398 399 sock_hold(connected); 400 sock_hold(listener); 401 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue); 402 } 403 EXPORT_SYMBOL_GPL(vsock_enqueue_accept); 404 405 static bool vsock_use_local_transport(unsigned int remote_cid) 406 { 407 if (!transport_local) 408 return false; 409 410 if (remote_cid == VMADDR_CID_LOCAL) 411 return true; 412 413 if (transport_g2h) { 414 return remote_cid == transport_g2h->get_local_cid(); 415 } else { 416 return remote_cid == VMADDR_CID_HOST; 417 } 418 } 419 420 static void vsock_deassign_transport(struct vsock_sock *vsk) 421 { 422 if (!vsk->transport) 423 return; 424 425 vsk->transport->destruct(vsk); 426 module_put(vsk->transport->module); 427 vsk->transport = NULL; 428 } 429 430 /* Assign a transport to a socket and call the .init transport callback. 431 * 432 * Note: for connection oriented socket this must be called when vsk->remote_addr 433 * is set (e.g. during the connect() or when a connection request on a listener 434 * socket is received). 435 * The vsk->remote_addr is used to decide which transport to use: 436 * - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if 437 * g2h is not loaded, will use local transport; 438 * - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field 439 * includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport; 440 * - remote CID > VMADDR_CID_HOST will use host->guest transport; 441 */ 442 int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) 443 { 444 const struct vsock_transport *new_transport; 445 struct sock *sk = sk_vsock(vsk); 446 unsigned int remote_cid = vsk->remote_addr.svm_cid; 447 __u8 remote_flags; 448 int ret; 449 450 /* If the packet is coming with the source and destination CIDs higher 451 * than VMADDR_CID_HOST, then a vsock channel where all the packets are 452 * forwarded to the host should be established. Then the host will 453 * need to forward the packets to the guest. 454 * 455 * The flag is set on the (listen) receive path (psk is not NULL). On 456 * the connect path the flag can be set by the user space application. 457 */ 458 if (psk && vsk->local_addr.svm_cid > VMADDR_CID_HOST && 459 vsk->remote_addr.svm_cid > VMADDR_CID_HOST) 460 vsk->remote_addr.svm_flags |= VMADDR_FLAG_TO_HOST; 461 462 remote_flags = vsk->remote_addr.svm_flags; 463 464 switch (sk->sk_type) { 465 case SOCK_DGRAM: 466 new_transport = transport_dgram; 467 break; 468 case SOCK_STREAM: 469 case SOCK_SEQPACKET: 470 if (vsock_use_local_transport(remote_cid)) 471 new_transport = transport_local; 472 else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g || 473 (remote_flags & VMADDR_FLAG_TO_HOST)) 474 new_transport = transport_g2h; 475 else 476 new_transport = transport_h2g; 477 break; 478 default: 479 return -ESOCKTNOSUPPORT; 480 } 481 482 if (vsk->transport) { 483 if (vsk->transport == new_transport) 484 return 0; 485 486 /* transport->release() must be called with sock lock acquired. 487 * This path can only be taken during vsock_connect(), where we 488 * have already held the sock lock. In the other cases, this 489 * function is called on a new socket which is not assigned to 490 * any transport. 491 */ 492 vsk->transport->release(vsk); 493 vsock_deassign_transport(vsk); 494 } 495 496 /* We increase the module refcnt to prevent the transport unloading 497 * while there are open sockets assigned to it. 498 */ 499 if (!new_transport || !try_module_get(new_transport->module)) 500 return -ENODEV; 501 502 if (sk->sk_type == SOCK_SEQPACKET) { 503 if (!new_transport->seqpacket_allow || 504 !new_transport->seqpacket_allow(remote_cid)) { 505 module_put(new_transport->module); 506 return -ESOCKTNOSUPPORT; 507 } 508 } 509 510 ret = new_transport->init(vsk, psk); 511 if (ret) { 512 module_put(new_transport->module); 513 return ret; 514 } 515 516 vsk->transport = new_transport; 517 518 return 0; 519 } 520 EXPORT_SYMBOL_GPL(vsock_assign_transport); 521 522 bool vsock_find_cid(unsigned int cid) 523 { 524 if (transport_g2h && cid == transport_g2h->get_local_cid()) 525 return true; 526 527 if (transport_h2g && cid == VMADDR_CID_HOST) 528 return true; 529 530 if (transport_local && cid == VMADDR_CID_LOCAL) 531 return true; 532 533 return false; 534 } 535 EXPORT_SYMBOL_GPL(vsock_find_cid); 536 537 static struct sock *vsock_dequeue_accept(struct sock *listener) 538 { 539 struct vsock_sock *vlistener; 540 struct vsock_sock *vconnected; 541 542 vlistener = vsock_sk(listener); 543 544 if (list_empty(&vlistener->accept_queue)) 545 return NULL; 546 547 vconnected = list_entry(vlistener->accept_queue.next, 548 struct vsock_sock, accept_queue); 549 550 list_del_init(&vconnected->accept_queue); 551 sock_put(listener); 552 /* The caller will need a reference on the connected socket so we let 553 * it call sock_put(). 554 */ 555 556 return sk_vsock(vconnected); 557 } 558 559 static bool vsock_is_accept_queue_empty(struct sock *sk) 560 { 561 struct vsock_sock *vsk = vsock_sk(sk); 562 return list_empty(&vsk->accept_queue); 563 } 564 565 static bool vsock_is_pending(struct sock *sk) 566 { 567 struct vsock_sock *vsk = vsock_sk(sk); 568 return !list_empty(&vsk->pending_links); 569 } 570 571 static int vsock_send_shutdown(struct sock *sk, int mode) 572 { 573 struct vsock_sock *vsk = vsock_sk(sk); 574 575 if (!vsk->transport) 576 return -ENODEV; 577 578 return vsk->transport->shutdown(vsk, mode); 579 } 580 581 static void vsock_pending_work(struct work_struct *work) 582 { 583 struct sock *sk; 584 struct sock *listener; 585 struct vsock_sock *vsk; 586 bool cleanup; 587 588 vsk = container_of(work, struct vsock_sock, pending_work.work); 589 sk = sk_vsock(vsk); 590 listener = vsk->listener; 591 cleanup = true; 592 593 lock_sock(listener); 594 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 595 596 if (vsock_is_pending(sk)) { 597 vsock_remove_pending(listener, sk); 598 599 sk_acceptq_removed(listener); 600 } else if (!vsk->rejected) { 601 /* We are not on the pending list and accept() did not reject 602 * us, so we must have been accepted by our user process. We 603 * just need to drop our references to the sockets and be on 604 * our way. 605 */ 606 cleanup = false; 607 goto out; 608 } 609 610 /* We need to remove ourself from the global connected sockets list so 611 * incoming packets can't find this socket, and to reduce the reference 612 * count. 613 */ 614 vsock_remove_connected(vsk); 615 616 sk->sk_state = TCP_CLOSE; 617 618 out: 619 release_sock(sk); 620 release_sock(listener); 621 if (cleanup) 622 sock_put(sk); 623 624 sock_put(sk); 625 sock_put(listener); 626 } 627 628 /**** SOCKET OPERATIONS ****/ 629 630 static int __vsock_bind_connectible(struct vsock_sock *vsk, 631 struct sockaddr_vm *addr) 632 { 633 static u32 port; 634 struct sockaddr_vm new_addr; 635 636 if (!port) 637 port = get_random_u32_above(LAST_RESERVED_PORT); 638 639 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port); 640 641 if (addr->svm_port == VMADDR_PORT_ANY) { 642 bool found = false; 643 unsigned int i; 644 645 for (i = 0; i < MAX_PORT_RETRIES; i++) { 646 if (port <= LAST_RESERVED_PORT) 647 port = LAST_RESERVED_PORT + 1; 648 649 new_addr.svm_port = port++; 650 651 if (!__vsock_find_bound_socket(&new_addr)) { 652 found = true; 653 break; 654 } 655 } 656 657 if (!found) 658 return -EADDRNOTAVAIL; 659 } else { 660 /* If port is in reserved range, ensure caller 661 * has necessary privileges. 662 */ 663 if (addr->svm_port <= LAST_RESERVED_PORT && 664 !capable(CAP_NET_BIND_SERVICE)) { 665 return -EACCES; 666 } 667 668 if (__vsock_find_bound_socket(&new_addr)) 669 return -EADDRINUSE; 670 } 671 672 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port); 673 674 /* Remove connection oriented sockets from the unbound list and add them 675 * to the hash table for easy lookup by its address. The unbound list 676 * is simply an extra entry at the end of the hash table, a trick used 677 * by AF_UNIX. 678 */ 679 __vsock_remove_bound(vsk); 680 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk); 681 682 return 0; 683 } 684 685 static int __vsock_bind_dgram(struct vsock_sock *vsk, 686 struct sockaddr_vm *addr) 687 { 688 return vsk->transport->dgram_bind(vsk, addr); 689 } 690 691 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) 692 { 693 struct vsock_sock *vsk = vsock_sk(sk); 694 int retval; 695 696 /* First ensure this socket isn't already bound. */ 697 if (vsock_addr_bound(&vsk->local_addr)) 698 return -EINVAL; 699 700 /* Now bind to the provided address or select appropriate values if 701 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that 702 * like AF_INET prevents binding to a non-local IP address (in most 703 * cases), we only allow binding to a local CID. 704 */ 705 if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid)) 706 return -EADDRNOTAVAIL; 707 708 switch (sk->sk_socket->type) { 709 case SOCK_STREAM: 710 case SOCK_SEQPACKET: 711 spin_lock_bh(&vsock_table_lock); 712 retval = __vsock_bind_connectible(vsk, addr); 713 spin_unlock_bh(&vsock_table_lock); 714 break; 715 716 case SOCK_DGRAM: 717 retval = __vsock_bind_dgram(vsk, addr); 718 break; 719 720 default: 721 retval = -EINVAL; 722 break; 723 } 724 725 return retval; 726 } 727 728 static void vsock_connect_timeout(struct work_struct *work); 729 730 static struct sock *__vsock_create(struct net *net, 731 struct socket *sock, 732 struct sock *parent, 733 gfp_t priority, 734 unsigned short type, 735 int kern) 736 { 737 struct sock *sk; 738 struct vsock_sock *psk; 739 struct vsock_sock *vsk; 740 741 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern); 742 if (!sk) 743 return NULL; 744 745 sock_init_data(sock, sk); 746 747 /* sk->sk_type is normally set in sock_init_data, but only if sock is 748 * non-NULL. We make sure that our sockets always have a type by 749 * setting it here if needed. 750 */ 751 if (!sock) 752 sk->sk_type = type; 753 754 vsk = vsock_sk(sk); 755 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 756 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 757 758 sk->sk_destruct = vsock_sk_destruct; 759 sk->sk_backlog_rcv = vsock_queue_rcv_skb; 760 sock_reset_flag(sk, SOCK_DONE); 761 762 INIT_LIST_HEAD(&vsk->bound_table); 763 INIT_LIST_HEAD(&vsk->connected_table); 764 vsk->listener = NULL; 765 INIT_LIST_HEAD(&vsk->pending_links); 766 INIT_LIST_HEAD(&vsk->accept_queue); 767 vsk->rejected = false; 768 vsk->sent_request = false; 769 vsk->ignore_connecting_rst = false; 770 vsk->peer_shutdown = 0; 771 INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout); 772 INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work); 773 774 psk = parent ? vsock_sk(parent) : NULL; 775 if (parent) { 776 vsk->trusted = psk->trusted; 777 vsk->owner = get_cred(psk->owner); 778 vsk->connect_timeout = psk->connect_timeout; 779 vsk->buffer_size = psk->buffer_size; 780 vsk->buffer_min_size = psk->buffer_min_size; 781 vsk->buffer_max_size = psk->buffer_max_size; 782 security_sk_clone(parent, sk); 783 } else { 784 vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN); 785 vsk->owner = get_current_cred(); 786 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; 787 vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE; 788 vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE; 789 vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE; 790 } 791 792 return sk; 793 } 794 795 static bool sock_type_connectible(u16 type) 796 { 797 return (type == SOCK_STREAM) || (type == SOCK_SEQPACKET); 798 } 799 800 static void __vsock_release(struct sock *sk, int level) 801 { 802 struct vsock_sock *vsk; 803 struct sock *pending; 804 805 vsk = vsock_sk(sk); 806 pending = NULL; /* Compiler warning. */ 807 808 /* When "level" is SINGLE_DEPTH_NESTING, use the nested 809 * version to avoid the warning "possible recursive locking 810 * detected". When "level" is 0, lock_sock_nested(sk, level) 811 * is the same as lock_sock(sk). 812 */ 813 lock_sock_nested(sk, level); 814 815 if (vsk->transport) 816 vsk->transport->release(vsk); 817 else if (sock_type_connectible(sk->sk_type)) 818 vsock_remove_sock(vsk); 819 820 sock_orphan(sk); 821 sk->sk_shutdown = SHUTDOWN_MASK; 822 823 skb_queue_purge(&sk->sk_receive_queue); 824 825 /* Clean up any sockets that never were accepted. */ 826 while ((pending = vsock_dequeue_accept(sk)) != NULL) { 827 __vsock_release(pending, SINGLE_DEPTH_NESTING); 828 sock_put(pending); 829 } 830 831 release_sock(sk); 832 sock_put(sk); 833 } 834 835 static void vsock_sk_destruct(struct sock *sk) 836 { 837 struct vsock_sock *vsk = vsock_sk(sk); 838 839 /* Flush MSG_ZEROCOPY leftovers. */ 840 __skb_queue_purge(&sk->sk_error_queue); 841 842 vsock_deassign_transport(vsk); 843 844 /* When clearing these addresses, there's no need to set the family and 845 * possibly register the address family with the kernel. 846 */ 847 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 848 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); 849 850 put_cred(vsk->owner); 851 } 852 853 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 854 { 855 int err; 856 857 err = sock_queue_rcv_skb(sk, skb); 858 if (err) 859 kfree_skb(skb); 860 861 return err; 862 } 863 864 struct sock *vsock_create_connected(struct sock *parent) 865 { 866 return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL, 867 parent->sk_type, 0); 868 } 869 EXPORT_SYMBOL_GPL(vsock_create_connected); 870 871 s64 vsock_stream_has_data(struct vsock_sock *vsk) 872 { 873 return vsk->transport->stream_has_data(vsk); 874 } 875 EXPORT_SYMBOL_GPL(vsock_stream_has_data); 876 877 s64 vsock_connectible_has_data(struct vsock_sock *vsk) 878 { 879 struct sock *sk = sk_vsock(vsk); 880 881 if (sk->sk_type == SOCK_SEQPACKET) 882 return vsk->transport->seqpacket_has_data(vsk); 883 else 884 return vsock_stream_has_data(vsk); 885 } 886 EXPORT_SYMBOL_GPL(vsock_connectible_has_data); 887 888 s64 vsock_stream_has_space(struct vsock_sock *vsk) 889 { 890 return vsk->transport->stream_has_space(vsk); 891 } 892 EXPORT_SYMBOL_GPL(vsock_stream_has_space); 893 894 void vsock_data_ready(struct sock *sk) 895 { 896 struct vsock_sock *vsk = vsock_sk(sk); 897 898 if (vsock_stream_has_data(vsk) >= sk->sk_rcvlowat || 899 sock_flag(sk, SOCK_DONE)) 900 sk->sk_data_ready(sk); 901 } 902 EXPORT_SYMBOL_GPL(vsock_data_ready); 903 904 /* Dummy callback required by sockmap. 905 * See unconditional call of saved_close() in sock_map_close(). 906 */ 907 static void vsock_close(struct sock *sk, long timeout) 908 { 909 } 910 911 static int vsock_release(struct socket *sock) 912 { 913 struct sock *sk = sock->sk; 914 915 if (!sk) 916 return 0; 917 918 sk->sk_prot->close(sk, 0); 919 __vsock_release(sk, 0); 920 sock->sk = NULL; 921 sock->state = SS_FREE; 922 923 return 0; 924 } 925 926 static int 927 vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 928 { 929 int err; 930 struct sock *sk; 931 struct sockaddr_vm *vm_addr; 932 933 sk = sock->sk; 934 935 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0) 936 return -EINVAL; 937 938 lock_sock(sk); 939 err = __vsock_bind(sk, vm_addr); 940 release_sock(sk); 941 942 return err; 943 } 944 945 static int vsock_getname(struct socket *sock, 946 struct sockaddr *addr, int peer) 947 { 948 int err; 949 struct sock *sk; 950 struct vsock_sock *vsk; 951 struct sockaddr_vm *vm_addr; 952 953 sk = sock->sk; 954 vsk = vsock_sk(sk); 955 err = 0; 956 957 lock_sock(sk); 958 959 if (peer) { 960 if (sock->state != SS_CONNECTED) { 961 err = -ENOTCONN; 962 goto out; 963 } 964 vm_addr = &vsk->remote_addr; 965 } else { 966 vm_addr = &vsk->local_addr; 967 } 968 969 if (!vm_addr) { 970 err = -EINVAL; 971 goto out; 972 } 973 974 /* sys_getsockname() and sys_getpeername() pass us a 975 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately 976 * that macro is defined in socket.c instead of .h, so we hardcode its 977 * value here. 978 */ 979 BUILD_BUG_ON(sizeof(*vm_addr) > 128); 980 memcpy(addr, vm_addr, sizeof(*vm_addr)); 981 err = sizeof(*vm_addr); 982 983 out: 984 release_sock(sk); 985 return err; 986 } 987 988 static int vsock_shutdown(struct socket *sock, int mode) 989 { 990 int err; 991 struct sock *sk; 992 993 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses 994 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode 995 * here like the other address families do. Note also that the 996 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3), 997 * which is what we want. 998 */ 999 mode++; 1000 1001 if ((mode & ~SHUTDOWN_MASK) || !mode) 1002 return -EINVAL; 1003 1004 /* If this is a connection oriented socket and it is not connected then 1005 * bail out immediately. If it is a DGRAM socket then we must first 1006 * kick the socket so that it wakes up from any sleeping calls, for 1007 * example recv(), and then afterwards return the error. 1008 */ 1009 1010 sk = sock->sk; 1011 1012 lock_sock(sk); 1013 if (sock->state == SS_UNCONNECTED) { 1014 err = -ENOTCONN; 1015 if (sock_type_connectible(sk->sk_type)) 1016 goto out; 1017 } else { 1018 sock->state = SS_DISCONNECTING; 1019 err = 0; 1020 } 1021 1022 /* Receive and send shutdowns are treated alike. */ 1023 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); 1024 if (mode) { 1025 sk->sk_shutdown |= mode; 1026 sk->sk_state_change(sk); 1027 1028 if (sock_type_connectible(sk->sk_type)) { 1029 sock_reset_flag(sk, SOCK_DONE); 1030 vsock_send_shutdown(sk, mode); 1031 } 1032 } 1033 1034 out: 1035 release_sock(sk); 1036 return err; 1037 } 1038 1039 static __poll_t vsock_poll(struct file *file, struct socket *sock, 1040 poll_table *wait) 1041 { 1042 struct sock *sk; 1043 __poll_t mask; 1044 struct vsock_sock *vsk; 1045 1046 sk = sock->sk; 1047 vsk = vsock_sk(sk); 1048 1049 poll_wait(file, sk_sleep(sk), wait); 1050 mask = 0; 1051 1052 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) 1053 /* Signify that there has been an error on this socket. */ 1054 mask |= EPOLLERR; 1055 1056 /* INET sockets treat local write shutdown and peer write shutdown as a 1057 * case of EPOLLHUP set. 1058 */ 1059 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 1060 ((sk->sk_shutdown & SEND_SHUTDOWN) && 1061 (vsk->peer_shutdown & SEND_SHUTDOWN))) { 1062 mask |= EPOLLHUP; 1063 } 1064 1065 if (sk->sk_shutdown & RCV_SHUTDOWN || 1066 vsk->peer_shutdown & SEND_SHUTDOWN) { 1067 mask |= EPOLLRDHUP; 1068 } 1069 1070 if (sk_is_readable(sk)) 1071 mask |= EPOLLIN | EPOLLRDNORM; 1072 1073 if (sock->type == SOCK_DGRAM) { 1074 /* For datagram sockets we can read if there is something in 1075 * the queue and write as long as the socket isn't shutdown for 1076 * sending. 1077 */ 1078 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || 1079 (sk->sk_shutdown & RCV_SHUTDOWN)) { 1080 mask |= EPOLLIN | EPOLLRDNORM; 1081 } 1082 1083 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 1084 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 1085 1086 } else if (sock_type_connectible(sk->sk_type)) { 1087 const struct vsock_transport *transport; 1088 1089 lock_sock(sk); 1090 1091 transport = vsk->transport; 1092 1093 /* Listening sockets that have connections in their accept 1094 * queue can be read. 1095 */ 1096 if (sk->sk_state == TCP_LISTEN 1097 && !vsock_is_accept_queue_empty(sk)) 1098 mask |= EPOLLIN | EPOLLRDNORM; 1099 1100 /* If there is something in the queue then we can read. */ 1101 if (transport && transport->stream_is_active(vsk) && 1102 !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1103 bool data_ready_now = false; 1104 int target = sock_rcvlowat(sk, 0, INT_MAX); 1105 int ret = transport->notify_poll_in( 1106 vsk, target, &data_ready_now); 1107 if (ret < 0) { 1108 mask |= EPOLLERR; 1109 } else { 1110 if (data_ready_now) 1111 mask |= EPOLLIN | EPOLLRDNORM; 1112 1113 } 1114 } 1115 1116 /* Sockets whose connections have been closed, reset, or 1117 * terminated should also be considered read, and we check the 1118 * shutdown flag for that. 1119 */ 1120 if (sk->sk_shutdown & RCV_SHUTDOWN || 1121 vsk->peer_shutdown & SEND_SHUTDOWN) { 1122 mask |= EPOLLIN | EPOLLRDNORM; 1123 } 1124 1125 /* Connected sockets that can produce data can be written. */ 1126 if (transport && sk->sk_state == TCP_ESTABLISHED) { 1127 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 1128 bool space_avail_now = false; 1129 int ret = transport->notify_poll_out( 1130 vsk, 1, &space_avail_now); 1131 if (ret < 0) { 1132 mask |= EPOLLERR; 1133 } else { 1134 if (space_avail_now) 1135 /* Remove EPOLLWRBAND since INET 1136 * sockets are not setting it. 1137 */ 1138 mask |= EPOLLOUT | EPOLLWRNORM; 1139 1140 } 1141 } 1142 } 1143 1144 /* Simulate INET socket poll behaviors, which sets 1145 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read, 1146 * but local send is not shutdown. 1147 */ 1148 if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) { 1149 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 1150 mask |= EPOLLOUT | EPOLLWRNORM; 1151 1152 } 1153 1154 release_sock(sk); 1155 } 1156 1157 return mask; 1158 } 1159 1160 static int vsock_read_skb(struct sock *sk, skb_read_actor_t read_actor) 1161 { 1162 struct vsock_sock *vsk = vsock_sk(sk); 1163 1164 return vsk->transport->read_skb(vsk, read_actor); 1165 } 1166 1167 static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg, 1168 size_t len) 1169 { 1170 int err; 1171 struct sock *sk; 1172 struct vsock_sock *vsk; 1173 struct sockaddr_vm *remote_addr; 1174 const struct vsock_transport *transport; 1175 1176 if (msg->msg_flags & MSG_OOB) 1177 return -EOPNOTSUPP; 1178 1179 /* For now, MSG_DONTWAIT is always assumed... */ 1180 err = 0; 1181 sk = sock->sk; 1182 vsk = vsock_sk(sk); 1183 1184 lock_sock(sk); 1185 1186 transport = vsk->transport; 1187 1188 err = vsock_auto_bind(vsk); 1189 if (err) 1190 goto out; 1191 1192 1193 /* If the provided message contains an address, use that. Otherwise 1194 * fall back on the socket's remote handle (if it has been connected). 1195 */ 1196 if (msg->msg_name && 1197 vsock_addr_cast(msg->msg_name, msg->msg_namelen, 1198 &remote_addr) == 0) { 1199 /* Ensure this address is of the right type and is a valid 1200 * destination. 1201 */ 1202 1203 if (remote_addr->svm_cid == VMADDR_CID_ANY) 1204 remote_addr->svm_cid = transport->get_local_cid(); 1205 1206 if (!vsock_addr_bound(remote_addr)) { 1207 err = -EINVAL; 1208 goto out; 1209 } 1210 } else if (sock->state == SS_CONNECTED) { 1211 remote_addr = &vsk->remote_addr; 1212 1213 if (remote_addr->svm_cid == VMADDR_CID_ANY) 1214 remote_addr->svm_cid = transport->get_local_cid(); 1215 1216 /* XXX Should connect() or this function ensure remote_addr is 1217 * bound? 1218 */ 1219 if (!vsock_addr_bound(&vsk->remote_addr)) { 1220 err = -EINVAL; 1221 goto out; 1222 } 1223 } else { 1224 err = -EINVAL; 1225 goto out; 1226 } 1227 1228 if (!transport->dgram_allow(remote_addr->svm_cid, 1229 remote_addr->svm_port)) { 1230 err = -EINVAL; 1231 goto out; 1232 } 1233 1234 err = transport->dgram_enqueue(vsk, remote_addr, msg, len); 1235 1236 out: 1237 release_sock(sk); 1238 return err; 1239 } 1240 1241 static int vsock_dgram_connect(struct socket *sock, 1242 struct sockaddr *addr, int addr_len, int flags) 1243 { 1244 int err; 1245 struct sock *sk; 1246 struct vsock_sock *vsk; 1247 struct sockaddr_vm *remote_addr; 1248 1249 sk = sock->sk; 1250 vsk = vsock_sk(sk); 1251 1252 err = vsock_addr_cast(addr, addr_len, &remote_addr); 1253 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) { 1254 lock_sock(sk); 1255 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, 1256 VMADDR_PORT_ANY); 1257 sock->state = SS_UNCONNECTED; 1258 release_sock(sk); 1259 return 0; 1260 } else if (err != 0) 1261 return -EINVAL; 1262 1263 lock_sock(sk); 1264 1265 err = vsock_auto_bind(vsk); 1266 if (err) 1267 goto out; 1268 1269 if (!vsk->transport->dgram_allow(remote_addr->svm_cid, 1270 remote_addr->svm_port)) { 1271 err = -EINVAL; 1272 goto out; 1273 } 1274 1275 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); 1276 sock->state = SS_CONNECTED; 1277 1278 /* sock map disallows redirection of non-TCP sockets with sk_state != 1279 * TCP_ESTABLISHED (see sock_map_redirect_allowed()), so we set 1280 * TCP_ESTABLISHED here to allow redirection of connected vsock dgrams. 1281 * 1282 * This doesn't seem to be abnormal state for datagram sockets, as the 1283 * same approach can be see in other datagram socket types as well 1284 * (such as unix sockets). 1285 */ 1286 sk->sk_state = TCP_ESTABLISHED; 1287 1288 out: 1289 release_sock(sk); 1290 return err; 1291 } 1292 1293 int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, 1294 size_t len, int flags) 1295 { 1296 struct sock *sk = sock->sk; 1297 struct vsock_sock *vsk = vsock_sk(sk); 1298 1299 return vsk->transport->dgram_dequeue(vsk, msg, len, flags); 1300 } 1301 1302 int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg, 1303 size_t len, int flags) 1304 { 1305 #ifdef CONFIG_BPF_SYSCALL 1306 struct sock *sk = sock->sk; 1307 const struct proto *prot; 1308 1309 prot = READ_ONCE(sk->sk_prot); 1310 if (prot != &vsock_proto) 1311 return prot->recvmsg(sk, msg, len, flags, NULL); 1312 #endif 1313 1314 return __vsock_dgram_recvmsg(sock, msg, len, flags); 1315 } 1316 EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg); 1317 1318 static int vsock_do_ioctl(struct socket *sock, unsigned int cmd, 1319 int __user *arg) 1320 { 1321 struct sock *sk = sock->sk; 1322 struct vsock_sock *vsk; 1323 int ret; 1324 1325 vsk = vsock_sk(sk); 1326 1327 switch (cmd) { 1328 case SIOCOUTQ: { 1329 ssize_t n_bytes; 1330 1331 if (!vsk->transport || !vsk->transport->unsent_bytes) { 1332 ret = -EOPNOTSUPP; 1333 break; 1334 } 1335 1336 if (sock_type_connectible(sk->sk_type) && sk->sk_state == TCP_LISTEN) { 1337 ret = -EINVAL; 1338 break; 1339 } 1340 1341 n_bytes = vsk->transport->unsent_bytes(vsk); 1342 if (n_bytes < 0) { 1343 ret = n_bytes; 1344 break; 1345 } 1346 1347 ret = put_user(n_bytes, arg); 1348 break; 1349 } 1350 default: 1351 ret = -ENOIOCTLCMD; 1352 } 1353 1354 return ret; 1355 } 1356 1357 static int vsock_ioctl(struct socket *sock, unsigned int cmd, 1358 unsigned long arg) 1359 { 1360 int ret; 1361 1362 lock_sock(sock->sk); 1363 ret = vsock_do_ioctl(sock, cmd, (int __user *)arg); 1364 release_sock(sock->sk); 1365 1366 return ret; 1367 } 1368 1369 static const struct proto_ops vsock_dgram_ops = { 1370 .family = PF_VSOCK, 1371 .owner = THIS_MODULE, 1372 .release = vsock_release, 1373 .bind = vsock_bind, 1374 .connect = vsock_dgram_connect, 1375 .socketpair = sock_no_socketpair, 1376 .accept = sock_no_accept, 1377 .getname = vsock_getname, 1378 .poll = vsock_poll, 1379 .ioctl = vsock_ioctl, 1380 .listen = sock_no_listen, 1381 .shutdown = vsock_shutdown, 1382 .sendmsg = vsock_dgram_sendmsg, 1383 .recvmsg = vsock_dgram_recvmsg, 1384 .mmap = sock_no_mmap, 1385 .read_skb = vsock_read_skb, 1386 }; 1387 1388 static int vsock_transport_cancel_pkt(struct vsock_sock *vsk) 1389 { 1390 const struct vsock_transport *transport = vsk->transport; 1391 1392 if (!transport || !transport->cancel_pkt) 1393 return -EOPNOTSUPP; 1394 1395 return transport->cancel_pkt(vsk); 1396 } 1397 1398 static void vsock_connect_timeout(struct work_struct *work) 1399 { 1400 struct sock *sk; 1401 struct vsock_sock *vsk; 1402 1403 vsk = container_of(work, struct vsock_sock, connect_work.work); 1404 sk = sk_vsock(vsk); 1405 1406 lock_sock(sk); 1407 if (sk->sk_state == TCP_SYN_SENT && 1408 (sk->sk_shutdown != SHUTDOWN_MASK)) { 1409 sk->sk_state = TCP_CLOSE; 1410 sk->sk_socket->state = SS_UNCONNECTED; 1411 sk->sk_err = ETIMEDOUT; 1412 sk_error_report(sk); 1413 vsock_transport_cancel_pkt(vsk); 1414 } 1415 release_sock(sk); 1416 1417 sock_put(sk); 1418 } 1419 1420 static int vsock_connect(struct socket *sock, struct sockaddr *addr, 1421 int addr_len, int flags) 1422 { 1423 int err; 1424 struct sock *sk; 1425 struct vsock_sock *vsk; 1426 const struct vsock_transport *transport; 1427 struct sockaddr_vm *remote_addr; 1428 long timeout; 1429 DEFINE_WAIT(wait); 1430 1431 err = 0; 1432 sk = sock->sk; 1433 vsk = vsock_sk(sk); 1434 1435 lock_sock(sk); 1436 1437 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */ 1438 switch (sock->state) { 1439 case SS_CONNECTED: 1440 err = -EISCONN; 1441 goto out; 1442 case SS_DISCONNECTING: 1443 err = -EINVAL; 1444 goto out; 1445 case SS_CONNECTING: 1446 /* This continues on so we can move sock into the SS_CONNECTED 1447 * state once the connection has completed (at which point err 1448 * will be set to zero also). Otherwise, we will either wait 1449 * for the connection or return -EALREADY should this be a 1450 * non-blocking call. 1451 */ 1452 err = -EALREADY; 1453 if (flags & O_NONBLOCK) 1454 goto out; 1455 break; 1456 default: 1457 if ((sk->sk_state == TCP_LISTEN) || 1458 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) { 1459 err = -EINVAL; 1460 goto out; 1461 } 1462 1463 /* Set the remote address that we are connecting to. */ 1464 memcpy(&vsk->remote_addr, remote_addr, 1465 sizeof(vsk->remote_addr)); 1466 1467 err = vsock_assign_transport(vsk, NULL); 1468 if (err) 1469 goto out; 1470 1471 transport = vsk->transport; 1472 1473 /* The hypervisor and well-known contexts do not have socket 1474 * endpoints. 1475 */ 1476 if (!transport || 1477 !transport->stream_allow(remote_addr->svm_cid, 1478 remote_addr->svm_port)) { 1479 err = -ENETUNREACH; 1480 goto out; 1481 } 1482 1483 if (vsock_msgzerocopy_allow(transport)) { 1484 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 1485 } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 1486 /* If this option was set before 'connect()', 1487 * when transport was unknown, check that this 1488 * feature is supported here. 1489 */ 1490 err = -EOPNOTSUPP; 1491 goto out; 1492 } 1493 1494 err = vsock_auto_bind(vsk); 1495 if (err) 1496 goto out; 1497 1498 sk->sk_state = TCP_SYN_SENT; 1499 1500 err = transport->connect(vsk); 1501 if (err < 0) 1502 goto out; 1503 1504 /* Mark sock as connecting and set the error code to in 1505 * progress in case this is a non-blocking connect. 1506 */ 1507 sock->state = SS_CONNECTING; 1508 err = -EINPROGRESS; 1509 } 1510 1511 /* The receive path will handle all communication until we are able to 1512 * enter the connected state. Here we wait for the connection to be 1513 * completed or a notification of an error. 1514 */ 1515 timeout = vsk->connect_timeout; 1516 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1517 1518 while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) { 1519 if (flags & O_NONBLOCK) { 1520 /* If we're not going to block, we schedule a timeout 1521 * function to generate a timeout on the connection 1522 * attempt, in case the peer doesn't respond in a 1523 * timely manner. We hold on to the socket until the 1524 * timeout fires. 1525 */ 1526 sock_hold(sk); 1527 1528 /* If the timeout function is already scheduled, 1529 * reschedule it, then ungrab the socket refcount to 1530 * keep it balanced. 1531 */ 1532 if (mod_delayed_work(system_wq, &vsk->connect_work, 1533 timeout)) 1534 sock_put(sk); 1535 1536 /* Skip ahead to preserve error code set above. */ 1537 goto out_wait; 1538 } 1539 1540 release_sock(sk); 1541 timeout = schedule_timeout(timeout); 1542 lock_sock(sk); 1543 1544 if (signal_pending(current)) { 1545 err = sock_intr_errno(timeout); 1546 sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE; 1547 sock->state = SS_UNCONNECTED; 1548 vsock_transport_cancel_pkt(vsk); 1549 vsock_remove_connected(vsk); 1550 goto out_wait; 1551 } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) { 1552 err = -ETIMEDOUT; 1553 sk->sk_state = TCP_CLOSE; 1554 sock->state = SS_UNCONNECTED; 1555 vsock_transport_cancel_pkt(vsk); 1556 goto out_wait; 1557 } 1558 1559 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1560 } 1561 1562 if (sk->sk_err) { 1563 err = -sk->sk_err; 1564 sk->sk_state = TCP_CLOSE; 1565 sock->state = SS_UNCONNECTED; 1566 } else { 1567 err = 0; 1568 } 1569 1570 out_wait: 1571 finish_wait(sk_sleep(sk), &wait); 1572 out: 1573 release_sock(sk); 1574 return err; 1575 } 1576 1577 static int vsock_accept(struct socket *sock, struct socket *newsock, 1578 struct proto_accept_arg *arg) 1579 { 1580 struct sock *listener; 1581 int err; 1582 struct sock *connected; 1583 struct vsock_sock *vconnected; 1584 long timeout; 1585 DEFINE_WAIT(wait); 1586 1587 err = 0; 1588 listener = sock->sk; 1589 1590 lock_sock(listener); 1591 1592 if (!sock_type_connectible(sock->type)) { 1593 err = -EOPNOTSUPP; 1594 goto out; 1595 } 1596 1597 if (listener->sk_state != TCP_LISTEN) { 1598 err = -EINVAL; 1599 goto out; 1600 } 1601 1602 /* Wait for children sockets to appear; these are the new sockets 1603 * created upon connection establishment. 1604 */ 1605 timeout = sock_rcvtimeo(listener, arg->flags & O_NONBLOCK); 1606 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); 1607 1608 while ((connected = vsock_dequeue_accept(listener)) == NULL && 1609 listener->sk_err == 0) { 1610 release_sock(listener); 1611 timeout = schedule_timeout(timeout); 1612 finish_wait(sk_sleep(listener), &wait); 1613 lock_sock(listener); 1614 1615 if (signal_pending(current)) { 1616 err = sock_intr_errno(timeout); 1617 goto out; 1618 } else if (timeout == 0) { 1619 err = -EAGAIN; 1620 goto out; 1621 } 1622 1623 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); 1624 } 1625 finish_wait(sk_sleep(listener), &wait); 1626 1627 if (listener->sk_err) 1628 err = -listener->sk_err; 1629 1630 if (connected) { 1631 sk_acceptq_removed(listener); 1632 1633 lock_sock_nested(connected, SINGLE_DEPTH_NESTING); 1634 vconnected = vsock_sk(connected); 1635 1636 /* If the listener socket has received an error, then we should 1637 * reject this socket and return. Note that we simply mark the 1638 * socket rejected, drop our reference, and let the cleanup 1639 * function handle the cleanup; the fact that we found it in 1640 * the listener's accept queue guarantees that the cleanup 1641 * function hasn't run yet. 1642 */ 1643 if (err) { 1644 vconnected->rejected = true; 1645 } else { 1646 newsock->state = SS_CONNECTED; 1647 sock_graft(connected, newsock); 1648 if (vsock_msgzerocopy_allow(vconnected->transport)) 1649 set_bit(SOCK_SUPPORT_ZC, 1650 &connected->sk_socket->flags); 1651 } 1652 1653 release_sock(connected); 1654 sock_put(connected); 1655 } 1656 1657 out: 1658 release_sock(listener); 1659 return err; 1660 } 1661 1662 static int vsock_listen(struct socket *sock, int backlog) 1663 { 1664 int err; 1665 struct sock *sk; 1666 struct vsock_sock *vsk; 1667 1668 sk = sock->sk; 1669 1670 lock_sock(sk); 1671 1672 if (!sock_type_connectible(sk->sk_type)) { 1673 err = -EOPNOTSUPP; 1674 goto out; 1675 } 1676 1677 if (sock->state != SS_UNCONNECTED) { 1678 err = -EINVAL; 1679 goto out; 1680 } 1681 1682 vsk = vsock_sk(sk); 1683 1684 if (!vsock_addr_bound(&vsk->local_addr)) { 1685 err = -EINVAL; 1686 goto out; 1687 } 1688 1689 sk->sk_max_ack_backlog = backlog; 1690 sk->sk_state = TCP_LISTEN; 1691 1692 err = 0; 1693 1694 out: 1695 release_sock(sk); 1696 return err; 1697 } 1698 1699 static void vsock_update_buffer_size(struct vsock_sock *vsk, 1700 const struct vsock_transport *transport, 1701 u64 val) 1702 { 1703 if (val > vsk->buffer_max_size) 1704 val = vsk->buffer_max_size; 1705 1706 if (val < vsk->buffer_min_size) 1707 val = vsk->buffer_min_size; 1708 1709 if (val != vsk->buffer_size && 1710 transport && transport->notify_buffer_size) 1711 transport->notify_buffer_size(vsk, &val); 1712 1713 vsk->buffer_size = val; 1714 } 1715 1716 static int vsock_connectible_setsockopt(struct socket *sock, 1717 int level, 1718 int optname, 1719 sockptr_t optval, 1720 unsigned int optlen) 1721 { 1722 int err; 1723 struct sock *sk; 1724 struct vsock_sock *vsk; 1725 const struct vsock_transport *transport; 1726 u64 val; 1727 1728 if (level != AF_VSOCK && level != SOL_SOCKET) 1729 return -ENOPROTOOPT; 1730 1731 #define COPY_IN(_v) \ 1732 do { \ 1733 if (optlen < sizeof(_v)) { \ 1734 err = -EINVAL; \ 1735 goto exit; \ 1736 } \ 1737 if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \ 1738 err = -EFAULT; \ 1739 goto exit; \ 1740 } \ 1741 } while (0) 1742 1743 err = 0; 1744 sk = sock->sk; 1745 vsk = vsock_sk(sk); 1746 1747 lock_sock(sk); 1748 1749 transport = vsk->transport; 1750 1751 if (level == SOL_SOCKET) { 1752 int zerocopy; 1753 1754 if (optname != SO_ZEROCOPY) { 1755 release_sock(sk); 1756 return sock_setsockopt(sock, level, optname, optval, optlen); 1757 } 1758 1759 /* Use 'int' type here, because variable to 1760 * set this option usually has this type. 1761 */ 1762 COPY_IN(zerocopy); 1763 1764 if (zerocopy < 0 || zerocopy > 1) { 1765 err = -EINVAL; 1766 goto exit; 1767 } 1768 1769 if (transport && !vsock_msgzerocopy_allow(transport)) { 1770 err = -EOPNOTSUPP; 1771 goto exit; 1772 } 1773 1774 sock_valbool_flag(sk, SOCK_ZEROCOPY, zerocopy); 1775 goto exit; 1776 } 1777 1778 switch (optname) { 1779 case SO_VM_SOCKETS_BUFFER_SIZE: 1780 COPY_IN(val); 1781 vsock_update_buffer_size(vsk, transport, val); 1782 break; 1783 1784 case SO_VM_SOCKETS_BUFFER_MAX_SIZE: 1785 COPY_IN(val); 1786 vsk->buffer_max_size = val; 1787 vsock_update_buffer_size(vsk, transport, vsk->buffer_size); 1788 break; 1789 1790 case SO_VM_SOCKETS_BUFFER_MIN_SIZE: 1791 COPY_IN(val); 1792 vsk->buffer_min_size = val; 1793 vsock_update_buffer_size(vsk, transport, vsk->buffer_size); 1794 break; 1795 1796 case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW: 1797 case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: { 1798 struct __kernel_sock_timeval tv; 1799 1800 err = sock_copy_user_timeval(&tv, optval, optlen, 1801 optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD); 1802 if (err) 1803 break; 1804 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC && 1805 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) { 1806 vsk->connect_timeout = tv.tv_sec * HZ + 1807 DIV_ROUND_UP((unsigned long)tv.tv_usec, (USEC_PER_SEC / HZ)); 1808 if (vsk->connect_timeout == 0) 1809 vsk->connect_timeout = 1810 VSOCK_DEFAULT_CONNECT_TIMEOUT; 1811 1812 } else { 1813 err = -ERANGE; 1814 } 1815 break; 1816 } 1817 1818 default: 1819 err = -ENOPROTOOPT; 1820 break; 1821 } 1822 1823 #undef COPY_IN 1824 1825 exit: 1826 release_sock(sk); 1827 return err; 1828 } 1829 1830 static int vsock_connectible_getsockopt(struct socket *sock, 1831 int level, int optname, 1832 char __user *optval, 1833 int __user *optlen) 1834 { 1835 struct sock *sk = sock->sk; 1836 struct vsock_sock *vsk = vsock_sk(sk); 1837 1838 union { 1839 u64 val64; 1840 struct old_timeval32 tm32; 1841 struct __kernel_old_timeval tm; 1842 struct __kernel_sock_timeval stm; 1843 } v; 1844 1845 int lv = sizeof(v.val64); 1846 int len; 1847 1848 if (level != AF_VSOCK) 1849 return -ENOPROTOOPT; 1850 1851 if (get_user(len, optlen)) 1852 return -EFAULT; 1853 1854 memset(&v, 0, sizeof(v)); 1855 1856 switch (optname) { 1857 case SO_VM_SOCKETS_BUFFER_SIZE: 1858 v.val64 = vsk->buffer_size; 1859 break; 1860 1861 case SO_VM_SOCKETS_BUFFER_MAX_SIZE: 1862 v.val64 = vsk->buffer_max_size; 1863 break; 1864 1865 case SO_VM_SOCKETS_BUFFER_MIN_SIZE: 1866 v.val64 = vsk->buffer_min_size; 1867 break; 1868 1869 case SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW: 1870 case SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD: 1871 lv = sock_get_timeout(vsk->connect_timeout, &v, 1872 optname == SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD); 1873 break; 1874 1875 default: 1876 return -ENOPROTOOPT; 1877 } 1878 1879 if (len < lv) 1880 return -EINVAL; 1881 if (len > lv) 1882 len = lv; 1883 if (copy_to_user(optval, &v, len)) 1884 return -EFAULT; 1885 1886 if (put_user(len, optlen)) 1887 return -EFAULT; 1888 1889 return 0; 1890 } 1891 1892 static int vsock_connectible_sendmsg(struct socket *sock, struct msghdr *msg, 1893 size_t len) 1894 { 1895 struct sock *sk; 1896 struct vsock_sock *vsk; 1897 const struct vsock_transport *transport; 1898 ssize_t total_written; 1899 long timeout; 1900 int err; 1901 struct vsock_transport_send_notify_data send_data; 1902 DEFINE_WAIT_FUNC(wait, woken_wake_function); 1903 1904 sk = sock->sk; 1905 vsk = vsock_sk(sk); 1906 total_written = 0; 1907 err = 0; 1908 1909 if (msg->msg_flags & MSG_OOB) 1910 return -EOPNOTSUPP; 1911 1912 lock_sock(sk); 1913 1914 transport = vsk->transport; 1915 1916 /* Callers should not provide a destination with connection oriented 1917 * sockets. 1918 */ 1919 if (msg->msg_namelen) { 1920 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; 1921 goto out; 1922 } 1923 1924 /* Send data only if both sides are not shutdown in the direction. */ 1925 if (sk->sk_shutdown & SEND_SHUTDOWN || 1926 vsk->peer_shutdown & RCV_SHUTDOWN) { 1927 err = -EPIPE; 1928 goto out; 1929 } 1930 1931 if (!transport || sk->sk_state != TCP_ESTABLISHED || 1932 !vsock_addr_bound(&vsk->local_addr)) { 1933 err = -ENOTCONN; 1934 goto out; 1935 } 1936 1937 if (!vsock_addr_bound(&vsk->remote_addr)) { 1938 err = -EDESTADDRREQ; 1939 goto out; 1940 } 1941 1942 if (msg->msg_flags & MSG_ZEROCOPY && 1943 !vsock_msgzerocopy_allow(transport)) { 1944 err = -EOPNOTSUPP; 1945 goto out; 1946 } 1947 1948 /* Wait for room in the produce queue to enqueue our user's data. */ 1949 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1950 1951 err = transport->notify_send_init(vsk, &send_data); 1952 if (err < 0) 1953 goto out; 1954 1955 while (total_written < len) { 1956 ssize_t written; 1957 1958 add_wait_queue(sk_sleep(sk), &wait); 1959 while (vsock_stream_has_space(vsk) == 0 && 1960 sk->sk_err == 0 && 1961 !(sk->sk_shutdown & SEND_SHUTDOWN) && 1962 !(vsk->peer_shutdown & RCV_SHUTDOWN)) { 1963 1964 /* Don't wait for non-blocking sockets. */ 1965 if (timeout == 0) { 1966 err = -EAGAIN; 1967 remove_wait_queue(sk_sleep(sk), &wait); 1968 goto out_err; 1969 } 1970 1971 err = transport->notify_send_pre_block(vsk, &send_data); 1972 if (err < 0) { 1973 remove_wait_queue(sk_sleep(sk), &wait); 1974 goto out_err; 1975 } 1976 1977 release_sock(sk); 1978 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout); 1979 lock_sock(sk); 1980 if (signal_pending(current)) { 1981 err = sock_intr_errno(timeout); 1982 remove_wait_queue(sk_sleep(sk), &wait); 1983 goto out_err; 1984 } else if (timeout == 0) { 1985 err = -EAGAIN; 1986 remove_wait_queue(sk_sleep(sk), &wait); 1987 goto out_err; 1988 } 1989 } 1990 remove_wait_queue(sk_sleep(sk), &wait); 1991 1992 /* These checks occur both as part of and after the loop 1993 * conditional since we need to check before and after 1994 * sleeping. 1995 */ 1996 if (sk->sk_err) { 1997 err = -sk->sk_err; 1998 goto out_err; 1999 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || 2000 (vsk->peer_shutdown & RCV_SHUTDOWN)) { 2001 err = -EPIPE; 2002 goto out_err; 2003 } 2004 2005 err = transport->notify_send_pre_enqueue(vsk, &send_data); 2006 if (err < 0) 2007 goto out_err; 2008 2009 /* Note that enqueue will only write as many bytes as are free 2010 * in the produce queue, so we don't need to ensure len is 2011 * smaller than the queue size. It is the caller's 2012 * responsibility to check how many bytes we were able to send. 2013 */ 2014 2015 if (sk->sk_type == SOCK_SEQPACKET) { 2016 written = transport->seqpacket_enqueue(vsk, 2017 msg, len - total_written); 2018 } else { 2019 written = transport->stream_enqueue(vsk, 2020 msg, len - total_written); 2021 } 2022 2023 if (written < 0) { 2024 err = written; 2025 goto out_err; 2026 } 2027 2028 total_written += written; 2029 2030 err = transport->notify_send_post_enqueue( 2031 vsk, written, &send_data); 2032 if (err < 0) 2033 goto out_err; 2034 2035 } 2036 2037 out_err: 2038 if (total_written > 0) { 2039 /* Return number of written bytes only if: 2040 * 1) SOCK_STREAM socket. 2041 * 2) SOCK_SEQPACKET socket when whole buffer is sent. 2042 */ 2043 if (sk->sk_type == SOCK_STREAM || total_written == len) 2044 err = total_written; 2045 } 2046 out: 2047 if (sk->sk_type == SOCK_STREAM) 2048 err = sk_stream_error(sk, msg->msg_flags, err); 2049 2050 release_sock(sk); 2051 return err; 2052 } 2053 2054 static int vsock_connectible_wait_data(struct sock *sk, 2055 struct wait_queue_entry *wait, 2056 long timeout, 2057 struct vsock_transport_recv_notify_data *recv_data, 2058 size_t target) 2059 { 2060 const struct vsock_transport *transport; 2061 struct vsock_sock *vsk; 2062 s64 data; 2063 int err; 2064 2065 vsk = vsock_sk(sk); 2066 err = 0; 2067 transport = vsk->transport; 2068 2069 while (1) { 2070 prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE); 2071 data = vsock_connectible_has_data(vsk); 2072 if (data != 0) 2073 break; 2074 2075 if (sk->sk_err != 0 || 2076 (sk->sk_shutdown & RCV_SHUTDOWN) || 2077 (vsk->peer_shutdown & SEND_SHUTDOWN)) { 2078 break; 2079 } 2080 2081 /* Don't wait for non-blocking sockets. */ 2082 if (timeout == 0) { 2083 err = -EAGAIN; 2084 break; 2085 } 2086 2087 if (recv_data) { 2088 err = transport->notify_recv_pre_block(vsk, target, recv_data); 2089 if (err < 0) 2090 break; 2091 } 2092 2093 release_sock(sk); 2094 timeout = schedule_timeout(timeout); 2095 lock_sock(sk); 2096 2097 if (signal_pending(current)) { 2098 err = sock_intr_errno(timeout); 2099 break; 2100 } else if (timeout == 0) { 2101 err = -EAGAIN; 2102 break; 2103 } 2104 } 2105 2106 finish_wait(sk_sleep(sk), wait); 2107 2108 if (err) 2109 return err; 2110 2111 /* Internal transport error when checking for available 2112 * data. XXX This should be changed to a connection 2113 * reset in a later change. 2114 */ 2115 if (data < 0) 2116 return -ENOMEM; 2117 2118 return data; 2119 } 2120 2121 static int __vsock_stream_recvmsg(struct sock *sk, struct msghdr *msg, 2122 size_t len, int flags) 2123 { 2124 struct vsock_transport_recv_notify_data recv_data; 2125 const struct vsock_transport *transport; 2126 struct vsock_sock *vsk; 2127 ssize_t copied; 2128 size_t target; 2129 long timeout; 2130 int err; 2131 2132 DEFINE_WAIT(wait); 2133 2134 vsk = vsock_sk(sk); 2135 transport = vsk->transport; 2136 2137 /* We must not copy less than target bytes into the user's buffer 2138 * before returning successfully, so we wait for the consume queue to 2139 * have that much data to consume before dequeueing. Note that this 2140 * makes it impossible to handle cases where target is greater than the 2141 * queue size. 2142 */ 2143 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 2144 if (target >= transport->stream_rcvhiwat(vsk)) { 2145 err = -ENOMEM; 2146 goto out; 2147 } 2148 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2149 copied = 0; 2150 2151 err = transport->notify_recv_init(vsk, target, &recv_data); 2152 if (err < 0) 2153 goto out; 2154 2155 2156 while (1) { 2157 ssize_t read; 2158 2159 err = vsock_connectible_wait_data(sk, &wait, timeout, 2160 &recv_data, target); 2161 if (err <= 0) 2162 break; 2163 2164 err = transport->notify_recv_pre_dequeue(vsk, target, 2165 &recv_data); 2166 if (err < 0) 2167 break; 2168 2169 read = transport->stream_dequeue(vsk, msg, len - copied, flags); 2170 if (read < 0) { 2171 err = read; 2172 break; 2173 } 2174 2175 copied += read; 2176 2177 err = transport->notify_recv_post_dequeue(vsk, target, read, 2178 !(flags & MSG_PEEK), &recv_data); 2179 if (err < 0) 2180 goto out; 2181 2182 if (read >= target || flags & MSG_PEEK) 2183 break; 2184 2185 target -= read; 2186 } 2187 2188 if (sk->sk_err) 2189 err = -sk->sk_err; 2190 else if (sk->sk_shutdown & RCV_SHUTDOWN) 2191 err = 0; 2192 2193 if (copied > 0) 2194 err = copied; 2195 2196 out: 2197 return err; 2198 } 2199 2200 static int __vsock_seqpacket_recvmsg(struct sock *sk, struct msghdr *msg, 2201 size_t len, int flags) 2202 { 2203 const struct vsock_transport *transport; 2204 struct vsock_sock *vsk; 2205 ssize_t msg_len; 2206 long timeout; 2207 int err = 0; 2208 DEFINE_WAIT(wait); 2209 2210 vsk = vsock_sk(sk); 2211 transport = vsk->transport; 2212 2213 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2214 2215 err = vsock_connectible_wait_data(sk, &wait, timeout, NULL, 0); 2216 if (err <= 0) 2217 goto out; 2218 2219 msg_len = transport->seqpacket_dequeue(vsk, msg, flags); 2220 2221 if (msg_len < 0) { 2222 err = msg_len; 2223 goto out; 2224 } 2225 2226 if (sk->sk_err) { 2227 err = -sk->sk_err; 2228 } else if (sk->sk_shutdown & RCV_SHUTDOWN) { 2229 err = 0; 2230 } else { 2231 /* User sets MSG_TRUNC, so return real length of 2232 * packet. 2233 */ 2234 if (flags & MSG_TRUNC) 2235 err = msg_len; 2236 else 2237 err = len - msg_data_left(msg); 2238 2239 /* Always set MSG_TRUNC if real length of packet is 2240 * bigger than user's buffer. 2241 */ 2242 if (msg_len > len) 2243 msg->msg_flags |= MSG_TRUNC; 2244 } 2245 2246 out: 2247 return err; 2248 } 2249 2250 int 2251 __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 2252 int flags) 2253 { 2254 struct sock *sk; 2255 struct vsock_sock *vsk; 2256 const struct vsock_transport *transport; 2257 int err; 2258 2259 sk = sock->sk; 2260 2261 if (unlikely(flags & MSG_ERRQUEUE)) 2262 return sock_recv_errqueue(sk, msg, len, SOL_VSOCK, VSOCK_RECVERR); 2263 2264 vsk = vsock_sk(sk); 2265 err = 0; 2266 2267 lock_sock(sk); 2268 2269 transport = vsk->transport; 2270 2271 if (!transport || sk->sk_state != TCP_ESTABLISHED) { 2272 /* Recvmsg is supposed to return 0 if a peer performs an 2273 * orderly shutdown. Differentiate between that case and when a 2274 * peer has not connected or a local shutdown occurred with the 2275 * SOCK_DONE flag. 2276 */ 2277 if (sock_flag(sk, SOCK_DONE)) 2278 err = 0; 2279 else 2280 err = -ENOTCONN; 2281 2282 goto out; 2283 } 2284 2285 if (flags & MSG_OOB) { 2286 err = -EOPNOTSUPP; 2287 goto out; 2288 } 2289 2290 /* We don't check peer_shutdown flag here since peer may actually shut 2291 * down, but there can be data in the queue that a local socket can 2292 * receive. 2293 */ 2294 if (sk->sk_shutdown & RCV_SHUTDOWN) { 2295 err = 0; 2296 goto out; 2297 } 2298 2299 /* It is valid on Linux to pass in a zero-length receive buffer. This 2300 * is not an error. We may as well bail out now. 2301 */ 2302 if (!len) { 2303 err = 0; 2304 goto out; 2305 } 2306 2307 if (sk->sk_type == SOCK_STREAM) 2308 err = __vsock_stream_recvmsg(sk, msg, len, flags); 2309 else 2310 err = __vsock_seqpacket_recvmsg(sk, msg, len, flags); 2311 2312 out: 2313 release_sock(sk); 2314 return err; 2315 } 2316 2317 int 2318 vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 2319 int flags) 2320 { 2321 #ifdef CONFIG_BPF_SYSCALL 2322 struct sock *sk = sock->sk; 2323 const struct proto *prot; 2324 2325 prot = READ_ONCE(sk->sk_prot); 2326 if (prot != &vsock_proto) 2327 return prot->recvmsg(sk, msg, len, flags, NULL); 2328 #endif 2329 2330 return __vsock_connectible_recvmsg(sock, msg, len, flags); 2331 } 2332 EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg); 2333 2334 static int vsock_set_rcvlowat(struct sock *sk, int val) 2335 { 2336 const struct vsock_transport *transport; 2337 struct vsock_sock *vsk; 2338 2339 vsk = vsock_sk(sk); 2340 2341 if (val > vsk->buffer_size) 2342 return -EINVAL; 2343 2344 transport = vsk->transport; 2345 2346 if (transport && transport->notify_set_rcvlowat) { 2347 int err; 2348 2349 err = transport->notify_set_rcvlowat(vsk, val); 2350 if (err) 2351 return err; 2352 } 2353 2354 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 2355 return 0; 2356 } 2357 2358 static const struct proto_ops vsock_stream_ops = { 2359 .family = PF_VSOCK, 2360 .owner = THIS_MODULE, 2361 .release = vsock_release, 2362 .bind = vsock_bind, 2363 .connect = vsock_connect, 2364 .socketpair = sock_no_socketpair, 2365 .accept = vsock_accept, 2366 .getname = vsock_getname, 2367 .poll = vsock_poll, 2368 .ioctl = vsock_ioctl, 2369 .listen = vsock_listen, 2370 .shutdown = vsock_shutdown, 2371 .setsockopt = vsock_connectible_setsockopt, 2372 .getsockopt = vsock_connectible_getsockopt, 2373 .sendmsg = vsock_connectible_sendmsg, 2374 .recvmsg = vsock_connectible_recvmsg, 2375 .mmap = sock_no_mmap, 2376 .set_rcvlowat = vsock_set_rcvlowat, 2377 .read_skb = vsock_read_skb, 2378 }; 2379 2380 static const struct proto_ops vsock_seqpacket_ops = { 2381 .family = PF_VSOCK, 2382 .owner = THIS_MODULE, 2383 .release = vsock_release, 2384 .bind = vsock_bind, 2385 .connect = vsock_connect, 2386 .socketpair = sock_no_socketpair, 2387 .accept = vsock_accept, 2388 .getname = vsock_getname, 2389 .poll = vsock_poll, 2390 .ioctl = vsock_ioctl, 2391 .listen = vsock_listen, 2392 .shutdown = vsock_shutdown, 2393 .setsockopt = vsock_connectible_setsockopt, 2394 .getsockopt = vsock_connectible_getsockopt, 2395 .sendmsg = vsock_connectible_sendmsg, 2396 .recvmsg = vsock_connectible_recvmsg, 2397 .mmap = sock_no_mmap, 2398 .read_skb = vsock_read_skb, 2399 }; 2400 2401 static int vsock_create(struct net *net, struct socket *sock, 2402 int protocol, int kern) 2403 { 2404 struct vsock_sock *vsk; 2405 struct sock *sk; 2406 int ret; 2407 2408 if (!sock) 2409 return -EINVAL; 2410 2411 if (protocol && protocol != PF_VSOCK) 2412 return -EPROTONOSUPPORT; 2413 2414 switch (sock->type) { 2415 case SOCK_DGRAM: 2416 sock->ops = &vsock_dgram_ops; 2417 break; 2418 case SOCK_STREAM: 2419 sock->ops = &vsock_stream_ops; 2420 break; 2421 case SOCK_SEQPACKET: 2422 sock->ops = &vsock_seqpacket_ops; 2423 break; 2424 default: 2425 return -ESOCKTNOSUPPORT; 2426 } 2427 2428 sock->state = SS_UNCONNECTED; 2429 2430 sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern); 2431 if (!sk) 2432 return -ENOMEM; 2433 2434 vsk = vsock_sk(sk); 2435 2436 if (sock->type == SOCK_DGRAM) { 2437 ret = vsock_assign_transport(vsk, NULL); 2438 if (ret < 0) { 2439 sock->sk = NULL; 2440 sock_put(sk); 2441 return ret; 2442 } 2443 } 2444 2445 /* SOCK_DGRAM doesn't have 'setsockopt' callback set in its 2446 * proto_ops, so there is no handler for custom logic. 2447 */ 2448 if (sock_type_connectible(sock->type)) 2449 set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); 2450 2451 vsock_insert_unbound(vsk); 2452 2453 return 0; 2454 } 2455 2456 static const struct net_proto_family vsock_family_ops = { 2457 .family = AF_VSOCK, 2458 .create = vsock_create, 2459 .owner = THIS_MODULE, 2460 }; 2461 2462 static long vsock_dev_do_ioctl(struct file *filp, 2463 unsigned int cmd, void __user *ptr) 2464 { 2465 u32 __user *p = ptr; 2466 u32 cid = VMADDR_CID_ANY; 2467 int retval = 0; 2468 2469 switch (cmd) { 2470 case IOCTL_VM_SOCKETS_GET_LOCAL_CID: 2471 /* To be compatible with the VMCI behavior, we prioritize the 2472 * guest CID instead of well-know host CID (VMADDR_CID_HOST). 2473 */ 2474 if (transport_g2h) 2475 cid = transport_g2h->get_local_cid(); 2476 else if (transport_h2g) 2477 cid = transport_h2g->get_local_cid(); 2478 2479 if (put_user(cid, p) != 0) 2480 retval = -EFAULT; 2481 break; 2482 2483 default: 2484 retval = -ENOIOCTLCMD; 2485 } 2486 2487 return retval; 2488 } 2489 2490 static long vsock_dev_ioctl(struct file *filp, 2491 unsigned int cmd, unsigned long arg) 2492 { 2493 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg); 2494 } 2495 2496 #ifdef CONFIG_COMPAT 2497 static long vsock_dev_compat_ioctl(struct file *filp, 2498 unsigned int cmd, unsigned long arg) 2499 { 2500 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg)); 2501 } 2502 #endif 2503 2504 static const struct file_operations vsock_device_ops = { 2505 .owner = THIS_MODULE, 2506 .unlocked_ioctl = vsock_dev_ioctl, 2507 #ifdef CONFIG_COMPAT 2508 .compat_ioctl = vsock_dev_compat_ioctl, 2509 #endif 2510 .open = nonseekable_open, 2511 }; 2512 2513 static struct miscdevice vsock_device = { 2514 .name = "vsock", 2515 .fops = &vsock_device_ops, 2516 }; 2517 2518 static int __init vsock_init(void) 2519 { 2520 int err = 0; 2521 2522 vsock_init_tables(); 2523 2524 vsock_proto.owner = THIS_MODULE; 2525 vsock_device.minor = MISC_DYNAMIC_MINOR; 2526 err = misc_register(&vsock_device); 2527 if (err) { 2528 pr_err("Failed to register misc device\n"); 2529 goto err_reset_transport; 2530 } 2531 2532 err = proto_register(&vsock_proto, 1); /* we want our slab */ 2533 if (err) { 2534 pr_err("Cannot register vsock protocol\n"); 2535 goto err_deregister_misc; 2536 } 2537 2538 err = sock_register(&vsock_family_ops); 2539 if (err) { 2540 pr_err("could not register af_vsock (%d) address family: %d\n", 2541 AF_VSOCK, err); 2542 goto err_unregister_proto; 2543 } 2544 2545 vsock_bpf_build_proto(); 2546 2547 return 0; 2548 2549 err_unregister_proto: 2550 proto_unregister(&vsock_proto); 2551 err_deregister_misc: 2552 misc_deregister(&vsock_device); 2553 err_reset_transport: 2554 return err; 2555 } 2556 2557 static void __exit vsock_exit(void) 2558 { 2559 misc_deregister(&vsock_device); 2560 sock_unregister(AF_VSOCK); 2561 proto_unregister(&vsock_proto); 2562 } 2563 2564 const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk) 2565 { 2566 return vsk->transport; 2567 } 2568 EXPORT_SYMBOL_GPL(vsock_core_get_transport); 2569 2570 int vsock_core_register(const struct vsock_transport *t, int features) 2571 { 2572 const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local; 2573 int err = mutex_lock_interruptible(&vsock_register_mutex); 2574 2575 if (err) 2576 return err; 2577 2578 t_h2g = transport_h2g; 2579 t_g2h = transport_g2h; 2580 t_dgram = transport_dgram; 2581 t_local = transport_local; 2582 2583 if (features & VSOCK_TRANSPORT_F_H2G) { 2584 if (t_h2g) { 2585 err = -EBUSY; 2586 goto err_busy; 2587 } 2588 t_h2g = t; 2589 } 2590 2591 if (features & VSOCK_TRANSPORT_F_G2H) { 2592 if (t_g2h) { 2593 err = -EBUSY; 2594 goto err_busy; 2595 } 2596 t_g2h = t; 2597 } 2598 2599 if (features & VSOCK_TRANSPORT_F_DGRAM) { 2600 if (t_dgram) { 2601 err = -EBUSY; 2602 goto err_busy; 2603 } 2604 t_dgram = t; 2605 } 2606 2607 if (features & VSOCK_TRANSPORT_F_LOCAL) { 2608 if (t_local) { 2609 err = -EBUSY; 2610 goto err_busy; 2611 } 2612 t_local = t; 2613 } 2614 2615 transport_h2g = t_h2g; 2616 transport_g2h = t_g2h; 2617 transport_dgram = t_dgram; 2618 transport_local = t_local; 2619 2620 err_busy: 2621 mutex_unlock(&vsock_register_mutex); 2622 return err; 2623 } 2624 EXPORT_SYMBOL_GPL(vsock_core_register); 2625 2626 void vsock_core_unregister(const struct vsock_transport *t) 2627 { 2628 mutex_lock(&vsock_register_mutex); 2629 2630 if (transport_h2g == t) 2631 transport_h2g = NULL; 2632 2633 if (transport_g2h == t) 2634 transport_g2h = NULL; 2635 2636 if (transport_dgram == t) 2637 transport_dgram = NULL; 2638 2639 if (transport_local == t) 2640 transport_local = NULL; 2641 2642 mutex_unlock(&vsock_register_mutex); 2643 } 2644 EXPORT_SYMBOL_GPL(vsock_core_unregister); 2645 2646 module_init(vsock_init); 2647 module_exit(vsock_exit); 2648 2649 MODULE_AUTHOR("VMware, Inc."); 2650 MODULE_DESCRIPTION("VMware Virtual Socket Family"); 2651 MODULE_VERSION("1.0.2.0-k"); 2652 MODULE_LICENSE("GPL v2"); 2653